Snap for 7088030 from 9eec0c32f8fdb79f3ba74117f2328864b541930c to tm-release

Change-Id: I501e15a1a5813c662ba7846b10f83e1e5c5cd3b3
diff --git a/darwin-x86/bin/cxx_extractor b/darwin-x86/bin/cxx_extractor
index cfc5f02..45f6335 100755
--- a/darwin-x86/bin/cxx_extractor
+++ b/darwin-x86/bin/cxx_extractor
Binary files differ
diff --git a/darwin-x86/bin/header-abi-diff b/darwin-x86/bin/header-abi-diff
index dd35fc7..5888e9b 100755
--- a/darwin-x86/bin/header-abi-diff
+++ b/darwin-x86/bin/header-abi-diff
Binary files differ
diff --git a/darwin-x86/bin/header-abi-dumper b/darwin-x86/bin/header-abi-dumper
index ae81974..87e9d77 100755
--- a/darwin-x86/bin/header-abi-dumper
+++ b/darwin-x86/bin/header-abi-dumper
Binary files differ
diff --git a/darwin-x86/bin/header-abi-linker b/darwin-x86/bin/header-abi-linker
index 14a4de4..b252b2d 100755
--- a/darwin-x86/bin/header-abi-linker
+++ b/darwin-x86/bin/header-abi-linker
Binary files differ
diff --git a/darwin-x86/bin/proto_metadata_plugin b/darwin-x86/bin/proto_metadata_plugin
index 2326396..3569c95 100755
--- a/darwin-x86/bin/proto_metadata_plugin
+++ b/darwin-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/darwin-x86/bin/protoc_extractor b/darwin-x86/bin/protoc_extractor
index 10b6f60..754c7ee 100755
--- a/darwin-x86/bin/protoc_extractor
+++ b/darwin-x86/bin/protoc_extractor
Binary files differ
diff --git a/darwin-x86/bin/versioner b/darwin-x86/bin/versioner
index 6d45046..2ba335b 100755
--- a/darwin-x86/bin/versioner
+++ b/darwin-x86/bin/versioner
Binary files differ
diff --git a/darwin-x86/clang-headers b/darwin-x86/clang-headers
index e8f09e7..0005fe8 120000
--- a/darwin-x86/clang-headers
+++ b/darwin-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/11.0.5/include
\ No newline at end of file
+lib64/clang/12.0.1/include
\ No newline at end of file
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_math.h b/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
deleted file mode 100644
index cf7014b..0000000
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
+++ /dev/null
@@ -1,1185 +0,0 @@
-/*===---- __clang_hip_math.h - HIP math decls -------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_HIP_MATH_H__
-#define __CLANG_HIP_MATH_H__
-
-#include <algorithm>
-#include <limits.h>
-#include <limits>
-#include <stdint.h>
-
-#pragma push_macro("__DEVICE__")
-#pragma push_macro("__RETURN_TYPE")
-
-// to be consistent with __clang_cuda_math_forward_declares
-#define __DEVICE__ static __device__
-#define __RETURN_TYPE bool
-
-__DEVICE__
-inline uint64_t __make_mantissa_base8(const char *__tagp) {
-  uint64_t __r = 0;
-  while (__tagp) {
-    char __tmp = *__tagp;
-
-    if (__tmp >= '0' && __tmp <= '7')
-      __r = (__r * 8u) + __tmp - '0';
-    else
-      return 0;
-
-    ++__tagp;
-  }
-
-  return __r;
-}
-
-__DEVICE__
-inline uint64_t __make_mantissa_base10(const char *__tagp) {
-  uint64_t __r = 0;
-  while (__tagp) {
-    char __tmp = *__tagp;
-
-    if (__tmp >= '0' && __tmp <= '9')
-      __r = (__r * 10u) + __tmp - '0';
-    else
-      return 0;
-
-    ++__tagp;
-  }
-
-  return __r;
-}
-
-__DEVICE__
-inline uint64_t __make_mantissa_base16(const char *__tagp) {
-  uint64_t __r = 0;
-  while (__tagp) {
-    char __tmp = *__tagp;
-
-    if (__tmp >= '0' && __tmp <= '9')
-      __r = (__r * 16u) + __tmp - '0';
-    else if (__tmp >= 'a' && __tmp <= 'f')
-      __r = (__r * 16u) + __tmp - 'a' + 10;
-    else if (__tmp >= 'A' && __tmp <= 'F')
-      __r = (__r * 16u) + __tmp - 'A' + 10;
-    else
-      return 0;
-
-    ++__tagp;
-  }
-
-  return __r;
-}
-
-__DEVICE__
-inline uint64_t __make_mantissa(const char *__tagp) {
-  if (!__tagp)
-    return 0u;
-
-  if (*__tagp == '0') {
-    ++__tagp;
-
-    if (*__tagp == 'x' || *__tagp == 'X')
-      return __make_mantissa_base16(__tagp);
-    else
-      return __make_mantissa_base8(__tagp);
-  }
-
-  return __make_mantissa_base10(__tagp);
-}
-
-// BEGIN FLOAT
-__DEVICE__
-inline float abs(float __x) { return __ocml_fabs_f32(__x); }
-__DEVICE__
-inline float acosf(float __x) { return __ocml_acos_f32(__x); }
-__DEVICE__
-inline float acoshf(float __x) { return __ocml_acosh_f32(__x); }
-__DEVICE__
-inline float asinf(float __x) { return __ocml_asin_f32(__x); }
-__DEVICE__
-inline float asinhf(float __x) { return __ocml_asinh_f32(__x); }
-__DEVICE__
-inline float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
-__DEVICE__
-inline float atanf(float __x) { return __ocml_atan_f32(__x); }
-__DEVICE__
-inline float atanhf(float __x) { return __ocml_atanh_f32(__x); }
-__DEVICE__
-inline float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
-__DEVICE__
-inline float ceilf(float __x) { return __ocml_ceil_f32(__x); }
-__DEVICE__
-inline float copysignf(float __x, float __y) {
-  return __ocml_copysign_f32(__x, __y);
-}
-__DEVICE__
-inline float cosf(float __x) { return __ocml_cos_f32(__x); }
-__DEVICE__
-inline float coshf(float __x) { return __ocml_cosh_f32(__x); }
-__DEVICE__
-inline float cospif(float __x) { return __ocml_cospi_f32(__x); }
-__DEVICE__
-inline float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
-__DEVICE__
-inline float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
-__DEVICE__
-inline float erfcf(float __x) { return __ocml_erfc_f32(__x); }
-__DEVICE__
-inline float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
-__DEVICE__
-inline float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
-__DEVICE__
-inline float erff(float __x) { return __ocml_erf_f32(__x); }
-__DEVICE__
-inline float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
-__DEVICE__
-inline float exp10f(float __x) { return __ocml_exp10_f32(__x); }
-__DEVICE__
-inline float exp2f(float __x) { return __ocml_exp2_f32(__x); }
-__DEVICE__
-inline float expf(float __x) { return __ocml_exp_f32(__x); }
-__DEVICE__
-inline float expm1f(float __x) { return __ocml_expm1_f32(__x); }
-__DEVICE__
-inline float fabsf(float __x) { return __ocml_fabs_f32(__x); }
-__DEVICE__
-inline float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
-__DEVICE__
-inline float fdividef(float __x, float __y) { return __x / __y; }
-__DEVICE__
-inline float floorf(float __x) { return __ocml_floor_f32(__x); }
-__DEVICE__
-inline float fmaf(float __x, float __y, float __z) {
-  return __ocml_fma_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
-__DEVICE__
-inline float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
-__DEVICE__
-inline float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
-__DEVICE__
-inline float frexpf(float __x, int *__nptr) {
-  int __tmp;
-  float __r =
-      __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
-  *__nptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
-__DEVICE__
-inline int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
-__DEVICE__
-inline __RETURN_TYPE isfinite(float __x) { return __ocml_isfinite_f32(__x); }
-__DEVICE__
-inline __RETURN_TYPE isinf(float __x) { return __ocml_isinf_f32(__x); }
-__DEVICE__
-inline __RETURN_TYPE isnan(float __x) { return __ocml_isnan_f32(__x); }
-__DEVICE__
-inline float j0f(float __x) { return __ocml_j0_f32(__x); }
-__DEVICE__
-inline float j1f(float __x) { return __ocml_j1_f32(__x); }
-__DEVICE__
-inline float jnf(int __n,
-                 float __x) { // TODO: we could use Ahmes multiplication
-                              // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case.
-  if (__n == 0)
-    return j0f(__x);
-  if (__n == 1)
-    return j1f(__x);
-
-  float __x0 = j0f(__x);
-  float __x1 = j1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    float __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-__DEVICE__
-inline float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
-__DEVICE__
-inline float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
-__DEVICE__
-inline long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
-__DEVICE__
-inline long long int llroundf(float __x) { return __ocml_round_f32(__x); }
-__DEVICE__
-inline float log10f(float __x) { return __ocml_log10_f32(__x); }
-__DEVICE__
-inline float log1pf(float __x) { return __ocml_log1p_f32(__x); }
-__DEVICE__
-inline float log2f(float __x) { return __ocml_log2_f32(__x); }
-__DEVICE__
-inline float logbf(float __x) { return __ocml_logb_f32(__x); }
-__DEVICE__
-inline float logf(float __x) { return __ocml_log_f32(__x); }
-__DEVICE__
-inline long int lrintf(float __x) { return __ocml_rint_f32(__x); }
-__DEVICE__
-inline long int lroundf(float __x) { return __ocml_round_f32(__x); }
-__DEVICE__
-inline float modff(float __x, float *__iptr) {
-  float __tmp;
-  float __r =
-      __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
-  *__iptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline float nanf(const char *__tagp) {
-  union {
-    float val;
-    struct ieee_float {
-      uint32_t mantissa : 22;
-      uint32_t quiet : 1;
-      uint32_t exponent : 8;
-      uint32_t sign : 1;
-    } bits;
-
-    static_assert(sizeof(float) == sizeof(ieee_float), "");
-  } __tmp;
-
-  __tmp.bits.sign = 0u;
-  __tmp.bits.exponent = ~0u;
-  __tmp.bits.quiet = 1u;
-  __tmp.bits.mantissa = __make_mantissa(__tagp);
-
-  return __tmp.val;
-}
-__DEVICE__
-inline float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
-__DEVICE__
-inline float nextafterf(float __x, float __y) {
-  return __ocml_nextafter_f32(__x, __y);
-}
-__DEVICE__
-inline float norm3df(float __x, float __y, float __z) {
-  return __ocml_len3_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float norm4df(float __x, float __y, float __z, float __w) {
-  return __ocml_len4_f32(__x, __y, __z, __w);
-}
-__DEVICE__
-inline float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
-__DEVICE__
-inline float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
-__DEVICE__
-inline float
-normf(int __dim,
-      const float *__a) { // TODO: placeholder until OCML adds support.
-  float __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_sqrt_f32(__r);
-}
-__DEVICE__
-inline float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
-__DEVICE__
-inline float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
-__DEVICE__
-inline float remainderf(float __x, float __y) {
-  return __ocml_remainder_f32(__x, __y);
-}
-__DEVICE__
-inline float remquof(float __x, float __y, int *__quo) {
-  int __tmp;
-  float __r = __ocml_remquo_f32(
-      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
-  *__quo = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline float rhypotf(float __x, float __y) {
-  return __ocml_rhypot_f32(__x, __y);
-}
-__DEVICE__
-inline float rintf(float __x) { return __ocml_rint_f32(__x); }
-__DEVICE__
-inline float rnorm3df(float __x, float __y, float __z) {
-  return __ocml_rlen3_f32(__x, __y, __z);
-}
-
-__DEVICE__
-inline float rnorm4df(float __x, float __y, float __z, float __w) {
-  return __ocml_rlen4_f32(__x, __y, __z, __w);
-}
-__DEVICE__
-inline float
-rnormf(int __dim,
-       const float *__a) { // TODO: placeholder until OCML adds support.
-  float __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_rsqrt_f32(__r);
-}
-__DEVICE__
-inline float roundf(float __x) { return __ocml_round_f32(__x); }
-__DEVICE__
-inline float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
-__DEVICE__
-inline float scalblnf(float __x, long int __n) {
-  return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
-                         : __ocml_scalb_f32(__x, __n);
-}
-__DEVICE__
-inline float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
-__DEVICE__
-inline __RETURN_TYPE signbit(float __x) { return __ocml_signbit_f32(__x); }
-__DEVICE__
-inline void sincosf(float __x, float *__sinptr, float *__cosptr) {
-  float __tmp;
-
-  *__sinptr =
-      __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline void sincospif(float __x, float *__sinptr, float *__cosptr) {
-  float __tmp;
-
-  *__sinptr = __ocml_sincospi_f32(
-      __x, (__attribute__((address_space(5))) float *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline float sinf(float __x) { return __ocml_sin_f32(__x); }
-__DEVICE__
-inline float sinhf(float __x) { return __ocml_sinh_f32(__x); }
-__DEVICE__
-inline float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
-__DEVICE__
-inline float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
-__DEVICE__
-inline float tanf(float __x) { return __ocml_tan_f32(__x); }
-__DEVICE__
-inline float tanhf(float __x) { return __ocml_tanh_f32(__x); }
-__DEVICE__
-inline float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
-__DEVICE__
-inline float truncf(float __x) { return __ocml_trunc_f32(__x); }
-__DEVICE__
-inline float y0f(float __x) { return __ocml_y0_f32(__x); }
-__DEVICE__
-inline float y1f(float __x) { return __ocml_y1_f32(__x); }
-__DEVICE__
-inline float ynf(int __n,
-                 float __x) { // TODO: we could use Ahmes multiplication
-                              // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case. Placeholder until OCML adds
-  //       support.
-  if (__n == 0)
-    return y0f(__x);
-  if (__n == 1)
-    return y1f(__x);
-
-  float __x0 = y0f(__x);
-  float __x1 = y1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    float __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-
-// BEGIN INTRINSICS
-__DEVICE__
-inline float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
-__DEVICE__
-inline float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
-__DEVICE__
-inline float __expf(float __x) { return __ocml_native_exp_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fadd_rd(float __x, float __y) {
-  return __ocml_add_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fadd_rn(float __x, float __y) { return __x + __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fadd_ru(float __x, float __y) {
-  return __ocml_add_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fadd_rz(float __x, float __y) {
-  return __ocml_add_rtz_f32(__x, __y);
-}
-__DEVICE__
-inline float __fdiv_rd(float __x, float __y) {
-  return __ocml_div_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fdiv_rn(float __x, float __y) { return __x / __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fdiv_ru(float __x, float __y) {
-  return __ocml_div_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fdiv_rz(float __x, float __y) {
-  return __ocml_div_rtz_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fdividef(float __x, float __y) { return __x / __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fmaf_rd(float __x, float __y, float __z) {
-  return __ocml_fma_rtn_f32(__x, __y, __z);
-}
-#endif
-__DEVICE__
-inline float __fmaf_rn(float __x, float __y, float __z) {
-  return __ocml_fma_f32(__x, __y, __z);
-}
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fmaf_ru(float __x, float __y, float __z) {
-  return __ocml_fma_rtp_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float __fmaf_rz(float __x, float __y, float __z) {
-  return __ocml_fma_rtz_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float __fmul_rd(float __x, float __y) {
-  return __ocml_mul_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fmul_rn(float __x, float __y) { return __x * __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fmul_ru(float __x, float __y) {
-  return __ocml_mul_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fmul_rz(float __x, float __y) {
-  return __ocml_mul_rtz_f32(__x, __y);
-}
-__DEVICE__
-inline float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#endif
-__DEVICE__
-inline float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-__DEVICE__
-inline float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#endif
-__DEVICE__
-inline float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
-#endif
-__DEVICE__
-inline float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
-__DEVICE__
-inline float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
-__DEVICE__
-inline float __fsub_rd(float __x, float __y) {
-  return __ocml_sub_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fsub_rn(float __x, float __y) { return __x - __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fsub_ru(float __x, float __y) {
-  return __ocml_sub_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fsub_rz(float __x, float __y) {
-  return __ocml_sub_rtz_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
-__DEVICE__
-inline float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
-__DEVICE__
-inline float __logf(float __x) { return __ocml_native_log_f32(__x); }
-__DEVICE__
-inline float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
-__DEVICE__
-inline float __saturatef(float __x) {
-  return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x);
-}
-__DEVICE__
-inline void __sincosf(float __x, float *__sinptr, float *__cosptr) {
-  *__sinptr = __ocml_native_sin_f32(__x);
-  *__cosptr = __ocml_native_cos_f32(__x);
-}
-__DEVICE__
-inline float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
-__DEVICE__
-inline float __tanf(float __x) { return __ocml_tan_f32(__x); }
-// END INTRINSICS
-// END FLOAT
-
-// BEGIN DOUBLE
-__DEVICE__
-inline double abs(double __x) { return __ocml_fabs_f64(__x); }
-__DEVICE__
-inline double acos(double __x) { return __ocml_acos_f64(__x); }
-__DEVICE__
-inline double acosh(double __x) { return __ocml_acosh_f64(__x); }
-__DEVICE__
-inline double asin(double __x) { return __ocml_asin_f64(__x); }
-__DEVICE__
-inline double asinh(double __x) { return __ocml_asinh_f64(__x); }
-__DEVICE__
-inline double atan(double __x) { return __ocml_atan_f64(__x); }
-__DEVICE__
-inline double atan2(double __x, double __y) {
-  return __ocml_atan2_f64(__x, __y);
-}
-__DEVICE__
-inline double atanh(double __x) { return __ocml_atanh_f64(__x); }
-__DEVICE__
-inline double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
-__DEVICE__
-inline double ceil(double __x) { return __ocml_ceil_f64(__x); }
-__DEVICE__
-inline double copysign(double __x, double __y) {
-  return __ocml_copysign_f64(__x, __y);
-}
-__DEVICE__
-inline double cos(double __x) { return __ocml_cos_f64(__x); }
-__DEVICE__
-inline double cosh(double __x) { return __ocml_cosh_f64(__x); }
-__DEVICE__
-inline double cospi(double __x) { return __ocml_cospi_f64(__x); }
-__DEVICE__
-inline double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
-__DEVICE__
-inline double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
-__DEVICE__
-inline double erf(double __x) { return __ocml_erf_f64(__x); }
-__DEVICE__
-inline double erfc(double __x) { return __ocml_erfc_f64(__x); }
-__DEVICE__
-inline double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
-__DEVICE__
-inline double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
-__DEVICE__
-inline double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
-__DEVICE__
-inline double exp(double __x) { return __ocml_exp_f64(__x); }
-__DEVICE__
-inline double exp10(double __x) { return __ocml_exp10_f64(__x); }
-__DEVICE__
-inline double exp2(double __x) { return __ocml_exp2_f64(__x); }
-__DEVICE__
-inline double expm1(double __x) { return __ocml_expm1_f64(__x); }
-__DEVICE__
-inline double fabs(double __x) { return __ocml_fabs_f64(__x); }
-__DEVICE__
-inline double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
-__DEVICE__
-inline double floor(double __x) { return __ocml_floor_f64(__x); }
-__DEVICE__
-inline double fma(double __x, double __y, double __z) {
-  return __ocml_fma_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
-__DEVICE__
-inline double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
-__DEVICE__
-inline double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
-__DEVICE__
-inline double frexp(double __x, int *__nptr) {
-  int __tmp;
-  double __r =
-      __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
-  *__nptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline double hypot(double __x, double __y) {
-  return __ocml_hypot_f64(__x, __y);
-}
-__DEVICE__
-inline int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
-__DEVICE__
-inline __RETURN_TYPE isfinite(double __x) { return __ocml_isfinite_f64(__x); }
-__DEVICE__
-inline __RETURN_TYPE isinf(double __x) { return __ocml_isinf_f64(__x); }
-__DEVICE__
-inline __RETURN_TYPE isnan(double __x) { return __ocml_isnan_f64(__x); }
-__DEVICE__
-inline double j0(double __x) { return __ocml_j0_f64(__x); }
-__DEVICE__
-inline double j1(double __x) { return __ocml_j1_f64(__x); }
-__DEVICE__
-inline double jn(int __n,
-                 double __x) { // TODO: we could use Ahmes multiplication
-                               // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case. Placeholder until OCML adds
-  //       support.
-  if (__n == 0)
-    return j0f(__x);
-  if (__n == 1)
-    return j1f(__x);
-
-  double __x0 = j0f(__x);
-  double __x1 = j1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    double __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-__DEVICE__
-inline double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
-__DEVICE__
-inline double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
-__DEVICE__
-inline long long int llrint(double __x) { return __ocml_rint_f64(__x); }
-__DEVICE__
-inline long long int llround(double __x) { return __ocml_round_f64(__x); }
-__DEVICE__
-inline double log(double __x) { return __ocml_log_f64(__x); }
-__DEVICE__
-inline double log10(double __x) { return __ocml_log10_f64(__x); }
-__DEVICE__
-inline double log1p(double __x) { return __ocml_log1p_f64(__x); }
-__DEVICE__
-inline double log2(double __x) { return __ocml_log2_f64(__x); }
-__DEVICE__
-inline double logb(double __x) { return __ocml_logb_f64(__x); }
-__DEVICE__
-inline long int lrint(double __x) { return __ocml_rint_f64(__x); }
-__DEVICE__
-inline long int lround(double __x) { return __ocml_round_f64(__x); }
-__DEVICE__
-inline double modf(double __x, double *__iptr) {
-  double __tmp;
-  double __r =
-      __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
-  *__iptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline double nan(const char *__tagp) {
-#if !_WIN32
-  union {
-    double val;
-    struct ieee_double {
-      uint64_t mantissa : 51;
-      uint32_t quiet : 1;
-      uint32_t exponent : 11;
-      uint32_t sign : 1;
-    } bits;
-    static_assert(sizeof(double) == sizeof(ieee_double), "");
-  } __tmp;
-
-  __tmp.bits.sign = 0u;
-  __tmp.bits.exponent = ~0u;
-  __tmp.bits.quiet = 1u;
-  __tmp.bits.mantissa = __make_mantissa(__tagp);
-
-  return __tmp.val;
-#else
-  static_assert(sizeof(uint64_t) == sizeof(double));
-  uint64_t val = __make_mantissa(__tagp);
-  val |= 0xFFF << 51;
-  return *reinterpret_cast<double *>(&val);
-#endif
-}
-__DEVICE__
-inline double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
-__DEVICE__
-inline double nextafter(double __x, double __y) {
-  return __ocml_nextafter_f64(__x, __y);
-}
-__DEVICE__
-inline double
-norm(int __dim,
-     const double *__a) { // TODO: placeholder until OCML adds support.
-  double __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_sqrt_f64(__r);
-}
-__DEVICE__
-inline double norm3d(double __x, double __y, double __z) {
-  return __ocml_len3_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double norm4d(double __x, double __y, double __z, double __w) {
-  return __ocml_len4_f64(__x, __y, __z, __w);
-}
-__DEVICE__
-inline double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
-__DEVICE__
-inline double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
-__DEVICE__
-inline double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
-__DEVICE__
-inline double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
-__DEVICE__
-inline double remainder(double __x, double __y) {
-  return __ocml_remainder_f64(__x, __y);
-}
-__DEVICE__
-inline double remquo(double __x, double __y, int *__quo) {
-  int __tmp;
-  double __r = __ocml_remquo_f64(
-      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
-  *__quo = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline double rhypot(double __x, double __y) {
-  return __ocml_rhypot_f64(__x, __y);
-}
-__DEVICE__
-inline double rint(double __x) { return __ocml_rint_f64(__x); }
-__DEVICE__
-inline double
-rnorm(int __dim,
-      const double *__a) { // TODO: placeholder until OCML adds support.
-  double __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_rsqrt_f64(__r);
-}
-__DEVICE__
-inline double rnorm3d(double __x, double __y, double __z) {
-  return __ocml_rlen3_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double rnorm4d(double __x, double __y, double __z, double __w) {
-  return __ocml_rlen4_f64(__x, __y, __z, __w);
-}
-__DEVICE__
-inline double round(double __x) { return __ocml_round_f64(__x); }
-__DEVICE__
-inline double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
-__DEVICE__
-inline double scalbln(double __x, long int __n) {
-  return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
-                         : __ocml_scalb_f64(__x, __n);
-}
-__DEVICE__
-inline double scalbn(double __x, int __n) {
-  return __ocml_scalbn_f64(__x, __n);
-}
-__DEVICE__
-inline __RETURN_TYPE signbit(double __x) { return __ocml_signbit_f64(__x); }
-__DEVICE__
-inline double sin(double __x) { return __ocml_sin_f64(__x); }
-__DEVICE__
-inline void sincos(double __x, double *__sinptr, double *__cosptr) {
-  double __tmp;
-  *__sinptr = __ocml_sincos_f64(
-      __x, (__attribute__((address_space(5))) double *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline void sincospi(double __x, double *__sinptr, double *__cosptr) {
-  double __tmp;
-  *__sinptr = __ocml_sincospi_f64(
-      __x, (__attribute__((address_space(5))) double *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline double sinh(double __x) { return __ocml_sinh_f64(__x); }
-__DEVICE__
-inline double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
-__DEVICE__
-inline double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
-__DEVICE__
-inline double tan(double __x) { return __ocml_tan_f64(__x); }
-__DEVICE__
-inline double tanh(double __x) { return __ocml_tanh_f64(__x); }
-__DEVICE__
-inline double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
-__DEVICE__
-inline double trunc(double __x) { return __ocml_trunc_f64(__x); }
-__DEVICE__
-inline double y0(double __x) { return __ocml_y0_f64(__x); }
-__DEVICE__
-inline double y1(double __x) { return __ocml_y1_f64(__x); }
-__DEVICE__
-inline double yn(int __n,
-                 double __x) { // TODO: we could use Ahmes multiplication
-                               // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case. Placeholder until OCML adds
-  //       support.
-  if (__n == 0)
-    return j0f(__x);
-  if (__n == 1)
-    return j1f(__x);
-
-  double __x0 = j0f(__x);
-  double __x1 = j1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    double __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-
-// BEGIN INTRINSICS
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dadd_rd(double __x, double __y) {
-  return __ocml_add_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __dadd_rn(double __x, double __y) { return __x + __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dadd_ru(double __x, double __y) {
-  return __ocml_add_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __dadd_rz(double __x, double __y) {
-  return __ocml_add_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __ddiv_rd(double __x, double __y) {
-  return __ocml_div_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __ddiv_rn(double __x, double __y) { return __x / __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __ddiv_ru(double __x, double __y) {
-  return __ocml_div_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __ddiv_rz(double __x, double __y) {
-  return __ocml_div_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __dmul_rd(double __x, double __y) {
-  return __ocml_mul_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __dmul_rn(double __x, double __y) { return __x * __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dmul_ru(double __x, double __y) {
-  return __ocml_mul_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __dmul_rz(double __x, double __y) {
-  return __ocml_mul_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-#endif
-__DEVICE__
-inline double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-__DEVICE__
-inline double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-__DEVICE__
-inline double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
-#endif
-__DEVICE__
-inline double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
-__DEVICE__
-inline double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
-__DEVICE__
-inline double __dsub_rd(double __x, double __y) {
-  return __ocml_sub_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __dsub_rn(double __x, double __y) { return __x - __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dsub_ru(double __x, double __y) {
-  return __ocml_sub_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __dsub_rz(double __x, double __y) {
-  return __ocml_sub_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __fma_rd(double __x, double __y, double __z) {
-  return __ocml_fma_rtn_f64(__x, __y, __z);
-}
-#endif
-__DEVICE__
-inline double __fma_rn(double __x, double __y, double __z) {
-  return __ocml_fma_f64(__x, __y, __z);
-}
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __fma_ru(double __x, double __y, double __z) {
-  return __ocml_fma_rtp_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double __fma_rz(double __x, double __y, double __z) {
-  return __ocml_fma_rtz_f64(__x, __y, __z);
-}
-#endif
-// END INTRINSICS
-// END DOUBLE
-
-// BEGIN INTEGER
-__DEVICE__
-inline int abs(int __x) {
-  int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
-  return (__x ^ __sgn) - __sgn;
-}
-__DEVICE__
-inline long labs(long __x) {
-  long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
-  return (__x ^ __sgn) - __sgn;
-}
-__DEVICE__
-inline long long llabs(long long __x) {
-  long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
-  return (__x ^ __sgn) - __sgn;
-}
-
-#if defined(__cplusplus)
-__DEVICE__
-inline long abs(long __x) { return labs(__x); }
-__DEVICE__
-inline long long abs(long long __x) { return llabs(__x); }
-#endif
-// END INTEGER
-
-__DEVICE__
-inline _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
-  return __ocml_fma_f16(__x, __y, __z);
-}
-
-__DEVICE__
-inline float fma(float __x, float __y, float __z) {
-  return fmaf(__x, __y, __z);
-}
-
-#pragma push_macro("__DEF_FUN1")
-#pragma push_macro("__DEF_FUN2")
-#pragma push_macro("__DEF_FUNI")
-#pragma push_macro("__DEF_FLOAT_FUN2I")
-#pragma push_macro("__HIP_OVERLOAD1")
-#pragma push_macro("__HIP_OVERLOAD2")
-
-// __hip_enable_if::type is a type function which returns __T if __B is true.
-template <bool __B, class __T = void> struct __hip_enable_if {};
-
-template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
-
-// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
-// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
-// floor(double).
-#define __HIP_OVERLOAD1(__retty, __fn)                                         \
-  template <typename __T>                                                      \
-  __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer,    \
-                                      __retty>::type                           \
-  __fn(__T __x) {                                                              \
-    return ::__fn((double)__x);                                                \
-  }
-
-// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
-// or integer argument to avoid compilation error due to ambibuity. e.g.
-// max(5.0f, 6.0) is resolved with max(double, double).
-#define __HIP_OVERLOAD2(__retty, __fn)                                         \
-  template <typename __T1, typename __T2>                                      \
-  __DEVICE__                                                                   \
-      typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&    \
-                                   std::numeric_limits<__T2>::is_specialized,  \
-                               __retty>::type                                  \
-      __fn(__T1 __x, __T2 __y) {                                               \
-    return __fn((double)__x, (double)__y);                                     \
-  }
-
-// Define cmath functions with float argument and returns float.
-#define __DEF_FUN1(__retty, __func)                                            \
-  __DEVICE__                                                                   \
-  inline float __func(float __x) { return __func##f(__x); }                    \
-  __HIP_OVERLOAD1(__retty, __func)
-
-// Define cmath functions with float argument and returns __retty.
-#define __DEF_FUNI(__retty, __func)                                            \
-  __DEVICE__                                                                   \
-  inline __retty __func(float __x) { return __func##f(__x); }                  \
-  __HIP_OVERLOAD1(__retty, __func)
-
-// define cmath functions with two float arguments.
-#define __DEF_FUN2(__retty, __func)                                            \
-  __DEVICE__                                                                   \
-  inline float __func(float __x, float __y) { return __func##f(__x, __y); }    \
-  __HIP_OVERLOAD2(__retty, __func)
-
-__DEF_FUN1(double, acos)
-__DEF_FUN1(double, acosh)
-__DEF_FUN1(double, asin)
-__DEF_FUN1(double, asinh)
-__DEF_FUN1(double, atan)
-__DEF_FUN2(double, atan2);
-__DEF_FUN1(double, atanh)
-__DEF_FUN1(double, cbrt)
-__DEF_FUN1(double, ceil)
-__DEF_FUN2(double, copysign);
-__DEF_FUN1(double, cos)
-__DEF_FUN1(double, cosh)
-__DEF_FUN1(double, erf)
-__DEF_FUN1(double, erfc)
-__DEF_FUN1(double, exp)
-__DEF_FUN1(double, exp2)
-__DEF_FUN1(double, expm1)
-__DEF_FUN1(double, fabs)
-__DEF_FUN2(double, fdim);
-__DEF_FUN1(double, floor)
-__DEF_FUN2(double, fmax);
-__DEF_FUN2(double, fmin);
-__DEF_FUN2(double, fmod);
-//__HIP_OVERLOAD1(int, fpclassify)
-__DEF_FUN2(double, hypot);
-__DEF_FUNI(int, ilogb)
-__HIP_OVERLOAD1(bool, isfinite)
-__HIP_OVERLOAD2(bool, isgreater);
-__HIP_OVERLOAD2(bool, isgreaterequal);
-__HIP_OVERLOAD1(bool, isinf);
-__HIP_OVERLOAD2(bool, isless);
-__HIP_OVERLOAD2(bool, islessequal);
-__HIP_OVERLOAD2(bool, islessgreater);
-__HIP_OVERLOAD1(bool, isnan);
-//__HIP_OVERLOAD1(bool, isnormal)
-__HIP_OVERLOAD2(bool, isunordered);
-__DEF_FUN1(double, lgamma)
-__DEF_FUN1(double, log)
-__DEF_FUN1(double, log10)
-__DEF_FUN1(double, log1p)
-__DEF_FUN1(double, log2)
-__DEF_FUN1(double, logb)
-__DEF_FUNI(long long, llrint)
-__DEF_FUNI(long long, llround)
-__DEF_FUNI(long, lrint)
-__DEF_FUNI(long, lround)
-__DEF_FUN1(double, nearbyint);
-__DEF_FUN2(double, nextafter);
-__DEF_FUN2(double, pow);
-__DEF_FUN2(double, remainder);
-__DEF_FUN1(double, rint);
-__DEF_FUN1(double, round);
-__HIP_OVERLOAD1(bool, signbit)
-__DEF_FUN1(double, sin)
-__DEF_FUN1(double, sinh)
-__DEF_FUN1(double, sqrt)
-__DEF_FUN1(double, tan)
-__DEF_FUN1(double, tanh)
-__DEF_FUN1(double, tgamma)
-__DEF_FUN1(double, trunc);
-
-// define cmath functions with a float and an integer argument.
-#define __DEF_FLOAT_FUN2I(__func)                                              \
-  __DEVICE__                                                                   \
-  inline float __func(float __x, int __y) { return __func##f(__x, __y); }
-__DEF_FLOAT_FUN2I(scalbn)
-
-template <class T> __DEVICE__ inline T min(T __arg1, T __arg2) {
-  return (__arg1 < __arg2) ? __arg1 : __arg2;
-}
-
-template <class T> __DEVICE__ inline T max(T __arg1, T __arg2) {
-  return (__arg1 > __arg2) ? __arg1 : __arg2;
-}
-
-__DEVICE__ inline int min(int __arg1, int __arg2) {
-  return (__arg1 < __arg2) ? __arg1 : __arg2;
-}
-__DEVICE__ inline int max(int __arg1, int __arg2) {
-  return (__arg1 > __arg2) ? __arg1 : __arg2;
-}
-
-__DEVICE__
-inline float max(float __x, float __y) { return fmaxf(__x, __y); }
-
-__DEVICE__
-inline double max(double __x, double __y) { return fmax(__x, __y); }
-
-__DEVICE__
-inline float min(float __x, float __y) { return fminf(__x, __y); }
-
-__DEVICE__
-inline double min(double __x, double __y) { return fmin(__x, __y); }
-
-__HIP_OVERLOAD2(double, max)
-__HIP_OVERLOAD2(double, min)
-
-__host__ inline static int min(int __arg1, int __arg2) {
-  return std::min(__arg1, __arg2);
-}
-
-__host__ inline static int max(int __arg1, int __arg2) {
-  return std::max(__arg1, __arg2);
-}
-
-#pragma pop_macro("__DEF_FUN1")
-#pragma pop_macro("__DEF_FUN2")
-#pragma pop_macro("__DEF_FUNI")
-#pragma pop_macro("__DEF_FLOAT_FUN2I")
-#pragma pop_macro("__HIP_OVERLOAD1")
-#pragma pop_macro("__HIP_OVERLOAD2")
-#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__RETURN_TYPE")
-
-#endif // __CLANG_HIP_MATH_H__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex b/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
deleted file mode 100644
index 1ed0b14..0000000
--- a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
+++ /dev/null
@@ -1,25 +0,0 @@
-/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_OPENMP_COMPLEX__
-#define __CLANG_OPENMP_COMPLEX__
-
-#ifndef _OPENMP
-#error "This file is for OpenMP compilation only."
-#endif
-
-// We require std::math functions in the complex builtins below.
-#include <cmath>
-
-#define __CUDA__
-#include <__clang_cuda_complex_builtins.h>
-#endif
-
-// Grab the host header too.
-#include_next <complex>
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_builtin_vars.h
similarity index 95%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_builtin_vars.h
index 2ba1521..412e823 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_builtin_vars.h
@@ -55,7 +55,9 @@
   __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z());
   // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a
   // uint3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator dim3() const;
   __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
 };
@@ -66,7 +68,9 @@
   __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z());
   // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a
   // uint3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator dim3() const;
   __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
 };
@@ -78,6 +82,8 @@
   // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a
   // dim3).  This function is defined after we pull in vector_types.h.
   __attribute__((device)) operator dim3() const;
+  __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
 };
@@ -89,6 +95,8 @@
   // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a
   // dim3).  This function is defined after we pull in vector_types.h.
   __attribute__((device)) operator dim3() const;
+  __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
 };
@@ -108,5 +116,6 @@
 #undef __CUDA_DEVICE_BUILTIN
 #undef __CUDA_BUILTIN_VAR
 #undef __CUDA_DISALLOW_BUILTINVAR_ACCESS
+#undef __DELETE
 
 #endif /* __CUDA_BUILTIN_VARS_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_cmath.h
similarity index 90%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_cmath.h
index 8ba1826..f49463d 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_cmath.h
@@ -66,10 +66,38 @@
 }
 
 // For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows. For OpenMP we omit these as some old system headers have
-// non-conforming `isinf(float)` and `isnan(float)` implementations that return
-// an `int`. The system versions of these functions should be fine anyway.
-#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
+// Windows.
+#if !defined(_MSC_VER) || defined(__OPENMP_NVPTX__)
+
+// For OpenMP we work around some old system headers that have non-conforming
+// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do
+// this by providing two versions of these functions, differing only in the
+// return type. To avoid conflicting definitions we disable implicit base
+// function generation. That means we will end up with two specializations, one
+// per type, but only one has a base function defined by the system header.
+#if defined(__OPENMP_NVPTX__)
+#pragma omp begin declare variant match(                                       \
+    implementation = {extension(disable_implicit_base)})
+
+// FIXME: We lack an extension to customize the mangling of the variants, e.g.,
+//        add a suffix. This means we would clash with the names of the variants
+//        (note that we do not create implicit base functions here). To avoid
+//        this clash we add a new trait to some of them that is always true
+//        (this is LLVM after all ;)). It will only influence the mangled name
+//        of the variants inside the inner region and avoid the clash.
+#pragma omp begin declare variant match(implementation = {vendor(llvm)})
+
+__DEVICE__ int isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ int isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ int isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ int isfinite(double __x) { return ::__isfinited(__x); }
+__DEVICE__ int isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ int isnan(double __x) { return ::__isnan(__x); }
+
+#pragma omp end declare variant
+
+#endif
+
 __DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
 __DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
 __DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
@@ -79,6 +107,11 @@
 __DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); }
 __DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
 __DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+
+#if defined(__OPENMP_NVPTX__)
+#pragma omp end declare variant
+#endif
+
 #endif
 
 __DEVICE__ bool isgreater(float __x, float __y) {
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_complex_builtins.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_device_functions.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_device_functions.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_intrinsics.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_math.h
similarity index 97%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_math.h
index 332e616..acb26ad 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_math.h
@@ -195,8 +195,8 @@
 __DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
 __DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
 __DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nearbyint(double __a) { return __builtin_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __builtin_nearbyintf(__a); }
 __DEVICE__ double nextafter(double __a, double __b) {
   return __nv_nextafter(__a, __b);
 }
@@ -249,8 +249,9 @@
 __DEVICE__ float rhypotf(float __a, float __b) {
   return __nv_rhypotf(__a, __b);
 }
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+// __nv_rint* in libdevice is buggy and produces incorrect results.
+__DEVICE__ double rint(double __a) { return __builtin_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __builtin_rintf(__a); }
 __DEVICE__ double rnorm(int __a, const double *__b) {
   return __nv_rnorm(__a, __b);
 }
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_math_forward_declares.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_runtime_wrapper.h
similarity index 97%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_runtime_wrapper.h
index f43ed55..f88c39a 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_cuda_runtime_wrapper.h
@@ -377,30 +377,38 @@
 // Out-of-line implementations from __clang_cuda_builtin_vars.h.  These need to
 // come after we've pulled in the definition of uint3 and dim3.
 
+__device__ inline __cuda_builtin_threadIdx_t::operator dim3() const {
+  return dim3(x, y, z);
+}
+
 __device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
-  uint3 ret;
-  ret.x = x;
-  ret.y = y;
-  ret.z = z;
-  return ret;
+  return {x, y, z};
+}
+
+__device__ inline __cuda_builtin_blockIdx_t::operator dim3() const {
+  return dim3(x, y, z);
 }
 
 __device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
-  uint3 ret;
-  ret.x = x;
-  ret.y = y;
-  ret.z = z;
-  return ret;
+  return {x, y, z};
 }
 
 __device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
   return dim3(x, y, z);
 }
 
+__device__ inline __cuda_builtin_blockDim_t::operator uint3() const {
+  return {x, y, z};
+}
+
 __device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
   return dim3(x, y, z);
 }
 
+__device__ inline __cuda_builtin_gridDim_t::operator uint3() const {
+  return {x, y, z};
+}
+
 #include <__clang_cuda_cmath.h>
 #include <__clang_cuda_intrinsics.h>
 #include <__clang_cuda_complex_builtins.h>
diff --git a/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_cmath.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_cmath.h
new file mode 100644
index 0000000..fea799e
--- /dev/null
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_cmath.h
@@ -0,0 +1,521 @@
+/*===---- __clang_hip_cmath.h - HIP cmath decls -----------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_CMATH_H__
+#define __CLANG_HIP_CMATH_H__
+
+#if !defined(__HIP__)
+#error "This file is for HIP and OpenMP AMDGCN device compilation only."
+#endif
+
+#if defined(__cplusplus)
+#include <limits>
+#endif
+#include <limits.h>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ static __device__ inline __attribute__((always_inline))
+
+// Start with functions that cannot be defined by DEF macros below.
+#if defined(__cplusplus)
+__DEVICE__ double abs(double __x) { return ::fabs(__x); }
+__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
+__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
+__DEVICE__ long abs(long __n) { return ::labs(__n); }
+__DEVICE__ float fma(float __x, float __y, float __z) {
+  return ::fmaf(__x, __y, __z);
+}
+__DEVICE__ int fpclassify(float __x) {
+  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+                              FP_ZERO, __x);
+}
+__DEVICE__ int fpclassify(double __x) {
+  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+                              FP_ZERO, __x);
+}
+__DEVICE__ float frexp(float __arg, int *__exp) {
+  return ::frexpf(__arg, __exp);
+}
+__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ bool isfinite(double __x) { return ::__finite(__x); }
+__DEVICE__ bool isgreater(float __x, float __y) {
+  return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreater(double __x, double __y) {
+  return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(float __x, float __y) {
+  return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(double __x, double __y) {
+  return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ bool isless(float __x, float __y) {
+  return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool isless(double __x, double __y) {
+  return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool islessequal(float __x, float __y) {
+  return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessequal(double __x, double __y) {
+  return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessgreater(float __x, float __y) {
+  return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool islessgreater(double __x, double __y) {
+  return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isunordered(float __x, float __y) {
+  return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ bool isunordered(double __x, double __y) {
+  return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
+__DEVICE__ float pow(float __base, int __iexp) {
+  return ::powif(__base, __iexp);
+}
+__DEVICE__ double pow(double __base, int __iexp) {
+  return ::powi(__base, __iexp);
+}
+__DEVICE__ float remquo(float __x, float __y, int *__quo) {
+  return ::remquof(__x, __y, __quo);
+}
+__DEVICE__ float scalbln(float __x, long int __n) {
+  return ::scalblnf(__x, __n);
+}
+__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
+__DEVICE__ bool signbit(double __x) { return ::__signbit(__x); }
+
+// Notably missing above is nexttoward.  We omit it because
+// ocml doesn't provide an implementation, and we don't want to be in the
+// business of implementing tricky libm functions in this header.
+
+// Other functions.
+__DEVICE__ _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
+  return __ocml_fma_f16(__x, __y, __z);
+}
+__DEVICE__ _Float16 pow(_Float16 __base, int __iexp) {
+  return __ocml_pown_f16(__base, __iexp);
+}
+
+// BEGIN DEF_FUN and HIP_OVERLOAD
+
+// BEGIN DEF_FUN
+
+#pragma push_macro("__DEF_FUN1")
+#pragma push_macro("__DEF_FUN2")
+#pragma push_macro("__DEF_FUN2_FI")
+
+// Define cmath functions with float argument and returns __retty.
+#define __DEF_FUN1(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  __retty __func(float __x) { return __func##f(__x); }
+
+// Define cmath functions with two float arguments and returns __retty.
+#define __DEF_FUN2(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  __retty __func(float __x, float __y) { return __func##f(__x, __y); }
+
+// Define cmath functions with a float and an int argument and returns __retty.
+#define __DEF_FUN2_FI(__retty, __func)                                         \
+  __DEVICE__                                                                   \
+  __retty __func(float __x, int __y) { return __func##f(__x, __y); }
+
+__DEF_FUN1(float, acos)
+__DEF_FUN1(float, acosh)
+__DEF_FUN1(float, asin)
+__DEF_FUN1(float, asinh)
+__DEF_FUN1(float, atan)
+__DEF_FUN2(float, atan2)
+__DEF_FUN1(float, atanh)
+__DEF_FUN1(float, cbrt)
+__DEF_FUN1(float, ceil)
+__DEF_FUN2(float, copysign)
+__DEF_FUN1(float, cos)
+__DEF_FUN1(float, cosh)
+__DEF_FUN1(float, erf)
+__DEF_FUN1(float, erfc)
+__DEF_FUN1(float, exp)
+__DEF_FUN1(float, exp2)
+__DEF_FUN1(float, expm1)
+__DEF_FUN1(float, fabs)
+__DEF_FUN2(float, fdim)
+__DEF_FUN1(float, floor)
+__DEF_FUN2(float, fmax)
+__DEF_FUN2(float, fmin)
+__DEF_FUN2(float, fmod)
+__DEF_FUN2(float, hypot)
+__DEF_FUN1(int, ilogb)
+__DEF_FUN2_FI(float, ldexp)
+__DEF_FUN1(float, lgamma)
+__DEF_FUN1(float, log)
+__DEF_FUN1(float, log10)
+__DEF_FUN1(float, log1p)
+__DEF_FUN1(float, log2)
+__DEF_FUN1(float, logb)
+__DEF_FUN1(long long, llrint)
+__DEF_FUN1(long long, llround)
+__DEF_FUN1(long, lrint)
+__DEF_FUN1(long, lround)
+__DEF_FUN1(float, nearbyint)
+__DEF_FUN2(float, nextafter)
+__DEF_FUN2(float, pow)
+__DEF_FUN2(float, remainder)
+__DEF_FUN1(float, rint)
+__DEF_FUN1(float, round)
+__DEF_FUN2_FI(float, scalbn)
+__DEF_FUN1(float, sin)
+__DEF_FUN1(float, sinh)
+__DEF_FUN1(float, sqrt)
+__DEF_FUN1(float, tan)
+__DEF_FUN1(float, tanh)
+__DEF_FUN1(float, tgamma)
+__DEF_FUN1(float, trunc)
+
+#pragma pop_macro("__DEF_FUN1")
+#pragma pop_macro("__DEF_FUN2")
+#pragma pop_macro("__DEF_FUN2_FI")
+
+// END DEF_FUN
+
+// BEGIN HIP_OVERLOAD
+
+#pragma push_macro("__HIP_OVERLOAD1")
+#pragma push_macro("__HIP_OVERLOAD2")
+
+// __hip_enable_if::type is a type function which returns __T if __B is true.
+template <bool __B, class __T = void> struct __hip_enable_if {};
+
+template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+
+// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
+// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
+// floor(double).
+#define __HIP_OVERLOAD1(__retty, __fn)                                         \
+  template <typename __T>                                                      \
+  __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer,    \
+                                      __retty>::type                           \
+  __fn(__T __x) {                                                              \
+    return ::__fn((double)__x);                                                \
+  }
+
+// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
+// or integer argument to avoid compilation error due to ambibuity. e.g.
+// max(5.0f, 6.0) is resolved with max(double, double).
+#define __HIP_OVERLOAD2(__retty, __fn)                                         \
+  template <typename __T1, typename __T2>                                      \
+  __DEVICE__                                                                   \
+      typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&    \
+                                   std::numeric_limits<__T2>::is_specialized,  \
+                               __retty>::type                                  \
+      __fn(__T1 __x, __T2 __y) {                                               \
+    return __fn((double)__x, (double)__y);                                     \
+  }
+
+__HIP_OVERLOAD1(double, abs)
+__HIP_OVERLOAD1(double, acos)
+__HIP_OVERLOAD1(double, acosh)
+__HIP_OVERLOAD1(double, asin)
+__HIP_OVERLOAD1(double, asinh)
+__HIP_OVERLOAD1(double, atan)
+__HIP_OVERLOAD2(double, atan2)
+__HIP_OVERLOAD1(double, atanh)
+__HIP_OVERLOAD1(double, cbrt)
+__HIP_OVERLOAD1(double, ceil)
+__HIP_OVERLOAD2(double, copysign)
+__HIP_OVERLOAD1(double, cos)
+__HIP_OVERLOAD1(double, cosh)
+__HIP_OVERLOAD1(double, erf)
+__HIP_OVERLOAD1(double, erfc)
+__HIP_OVERLOAD1(double, exp)
+__HIP_OVERLOAD1(double, exp2)
+__HIP_OVERLOAD1(double, expm1)
+__HIP_OVERLOAD1(double, fabs)
+__HIP_OVERLOAD2(double, fdim)
+__HIP_OVERLOAD1(double, floor)
+__HIP_OVERLOAD2(double, fmax)
+__HIP_OVERLOAD2(double, fmin)
+__HIP_OVERLOAD2(double, fmod)
+__HIP_OVERLOAD1(int, fpclassify)
+__HIP_OVERLOAD2(double, hypot)
+__HIP_OVERLOAD1(int, ilogb)
+__HIP_OVERLOAD1(bool, isfinite)
+__HIP_OVERLOAD2(bool, isgreater)
+__HIP_OVERLOAD2(bool, isgreaterequal)
+__HIP_OVERLOAD1(bool, isinf)
+__HIP_OVERLOAD2(bool, isless)
+__HIP_OVERLOAD2(bool, islessequal)
+__HIP_OVERLOAD2(bool, islessgreater)
+__HIP_OVERLOAD1(bool, isnan)
+__HIP_OVERLOAD1(bool, isnormal)
+__HIP_OVERLOAD2(bool, isunordered)
+__HIP_OVERLOAD1(double, lgamma)
+__HIP_OVERLOAD1(double, log)
+__HIP_OVERLOAD1(double, log10)
+__HIP_OVERLOAD1(double, log1p)
+__HIP_OVERLOAD1(double, log2)
+__HIP_OVERLOAD1(double, logb)
+__HIP_OVERLOAD1(long long, llrint)
+__HIP_OVERLOAD1(long long, llround)
+__HIP_OVERLOAD1(long, lrint)
+__HIP_OVERLOAD1(long, lround)
+__HIP_OVERLOAD1(double, nearbyint)
+__HIP_OVERLOAD2(double, nextafter)
+__HIP_OVERLOAD2(double, pow)
+__HIP_OVERLOAD2(double, remainder)
+__HIP_OVERLOAD1(double, rint)
+__HIP_OVERLOAD1(double, round)
+__HIP_OVERLOAD1(bool, signbit)
+__HIP_OVERLOAD1(double, sin)
+__HIP_OVERLOAD1(double, sinh)
+__HIP_OVERLOAD1(double, sqrt)
+__HIP_OVERLOAD1(double, tan)
+__HIP_OVERLOAD1(double, tanh)
+__HIP_OVERLOAD1(double, tgamma)
+__HIP_OVERLOAD1(double, trunc)
+
+// Overload these but don't add them to std, they are not part of cmath.
+__HIP_OVERLOAD2(double, max)
+__HIP_OVERLOAD2(double, min)
+
+// Additional Overloads that don't quite match HIP_OVERLOAD.
+template <typename __T1, typename __T2, typename __T3>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
+                                 std::numeric_limits<__T2>::is_specialized &&
+                                 std::numeric_limits<__T3>::is_specialized,
+                             double>::type
+    fma(__T1 __x, __T2 __y, __T3 __z) {
+  return ::fma((double)__x, (double)__y, (double)__z);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    frexp(__T __x, int *__exp) {
+  return ::frexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    ldexp(__T __x, int __exp) {
+  return ::ldexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    modf(__T __x, double *__exp) {
+  return ::modf((double)__x, __exp);
+}
+
+template <typename __T1, typename __T2>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
+                                 std::numeric_limits<__T2>::is_specialized,
+                             double>::type
+    remquo(__T1 __x, __T2 __y, int *__quo) {
+  return ::remquo((double)__x, (double)__y, __quo);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    scalbln(__T __x, long int __exp) {
+  return ::scalbln((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    scalbn(__T __x, int __exp) {
+  return ::scalbn((double)__x, __exp);
+}
+
+#pragma pop_macro("__HIP_OVERLOAD1")
+#pragma pop_macro("__HIP_OVERLOAD2")
+
+// END HIP_OVERLOAD
+
+// END DEF_FUN and HIP_OVERLOAD
+
+#endif // defined(__cplusplus)
+
+// Define these overloads inside the namespace our standard library uses.
+#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
+_LIBCPP_BEGIN_NAMESPACE_STD
+#else
+namespace std {
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#endif
+#endif
+
+// Pull the new overloads we defined above into namespace std.
+// using ::abs; - This may be considered for C++.
+using ::acos;
+using ::acosh;
+using ::asin;
+using ::asinh;
+using ::atan;
+using ::atan2;
+using ::atanh;
+using ::cbrt;
+using ::ceil;
+using ::copysign;
+using ::cos;
+using ::cosh;
+using ::erf;
+using ::erfc;
+using ::exp;
+using ::exp2;
+using ::expm1;
+using ::fabs;
+using ::fdim;
+using ::floor;
+using ::fma;
+using ::fmax;
+using ::fmin;
+using ::fmod;
+using ::fpclassify;
+using ::frexp;
+using ::hypot;
+using ::ilogb;
+using ::isfinite;
+using ::isgreater;
+using ::isgreaterequal;
+using ::isless;
+using ::islessequal;
+using ::islessgreater;
+using ::isnormal;
+using ::isunordered;
+using ::ldexp;
+using ::lgamma;
+using ::llrint;
+using ::llround;
+using ::log;
+using ::log10;
+using ::log1p;
+using ::log2;
+using ::logb;
+using ::lrint;
+using ::lround;
+using ::modf;
+// using ::nan; - This may be considered for C++.
+// using ::nanf; - This may be considered for C++.
+// using ::nanl; - This is not yet defined.
+using ::nearbyint;
+using ::nextafter;
+// using ::nexttoward; - Omit this since we do not have a definition.
+using ::pow;
+using ::remainder;
+using ::remquo;
+using ::rint;
+using ::round;
+using ::scalbln;
+using ::scalbn;
+using ::signbit;
+using ::sin;
+using ::sinh;
+using ::sqrt;
+using ::tan;
+using ::tanh;
+using ::tgamma;
+using ::trunc;
+
+// Well this is fun: We need to pull these symbols in for libc++, but we can't
+// pull them in with libstdc++, because its ::isinf and ::isnan are different
+// than its std::isinf and std::isnan.
+#ifndef __GLIBCXX__
+using ::isinf;
+using ::isnan;
+#endif
+
+// Finally, pull the "foobarf" functions that HIP defines into std.
+using ::acosf;
+using ::acoshf;
+using ::asinf;
+using ::asinhf;
+using ::atan2f;
+using ::atanf;
+using ::atanhf;
+using ::cbrtf;
+using ::ceilf;
+using ::copysignf;
+using ::cosf;
+using ::coshf;
+using ::erfcf;
+using ::erff;
+using ::exp2f;
+using ::expf;
+using ::expm1f;
+using ::fabsf;
+using ::fdimf;
+using ::floorf;
+using ::fmaf;
+using ::fmaxf;
+using ::fminf;
+using ::fmodf;
+using ::frexpf;
+using ::hypotf;
+using ::ilogbf;
+using ::ldexpf;
+using ::lgammaf;
+using ::llrintf;
+using ::llroundf;
+using ::log10f;
+using ::log1pf;
+using ::log2f;
+using ::logbf;
+using ::logf;
+using ::lrintf;
+using ::lroundf;
+using ::modff;
+using ::nearbyintf;
+using ::nextafterf;
+// using ::nexttowardf; - Omit this since we do not have a definition.
+using ::powf;
+using ::remainderf;
+using ::remquof;
+using ::rintf;
+using ::roundf;
+using ::scalblnf;
+using ::scalbnf;
+using ::sinf;
+using ::sinhf;
+using ::sqrtf;
+using ::tanf;
+using ::tanhf;
+using ::tgammaf;
+using ::truncf;
+
+#ifdef _LIBCPP_END_NAMESPACE_STD
+_LIBCPP_END_NAMESPACE_STD
+#else
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
+} // namespace std
+#endif
+
+#pragma pop_macro("__DEVICE__")
+
+#endif // __CLANG_HIP_CMATH_H__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_libdevice_declares.h
similarity index 97%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_hip_libdevice_declares.h
index e1cd49a..ac98907 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_libdevice_declares.h
@@ -10,7 +10,9 @@
 #ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
 #define __CLANG_HIP_LIBDEVICE_DECLARES_H__
 
+#ifdef __cplusplus
 extern "C" {
+#endif
 
 // BEGIN FLOAT
 __device__ __attribute__((const)) float __ocml_acos_f32(float);
@@ -78,6 +80,7 @@
 __device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
 __device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
 __device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
+__device__ __attribute__((pure)) float __ocml_pown_f32(float, int);
 __device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
 __device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
 __device__ float __ocml_remquo_f32(float, float,
@@ -126,10 +129,10 @@
 __device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
 __device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
 __device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float);
 __device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
 __device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
 __device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
@@ -205,6 +208,7 @@
 __device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
 __device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
 __device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
+__device__ __attribute__((pure)) double __ocml_pown_f64(double, int);
 __device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
 __device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
 __device__ double __ocml_remquo_f64(double, double,
@@ -252,10 +256,10 @@
 __device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
 __device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
 __device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double);
 __device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
                                                             double);
 __device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
@@ -290,6 +294,7 @@
 __device__ _Float16 __ocml_sin_f16(_Float16);
 __device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
 __device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);
 
 typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
 typedef short __2i16 __attribute__((ext_vector_type(2)));
@@ -313,14 +318,17 @@
 __device__ inline __2f16
 __llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
 {
-  return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
+  return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y));
 }
 __device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
 __device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
 __device__ __2f16 __ocml_sin_2f16(__2f16);
 __device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
 __device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_pown_2f16(__2f16, __2i16);
 
+#ifdef __cplusplus
 } // extern "C"
+#endif
 
 #endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_math.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_math.h
new file mode 100644
index 0000000..f2365e8
--- /dev/null
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_math.h
@@ -0,0 +1,1246 @@
+/*===---- __clang_hip_math.h - Device-side HIP math support ----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_HIP_MATH_H__
+#define __CLANG_HIP_MATH_H__
+
+#if !defined(__HIP__)
+#error "This file is for HIP and OpenMP AMDGCN device compilation only."
+#endif
+
+#if defined(__cplusplus)
+#include <algorithm>
+#endif
+#include <limits.h>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ static __device__ inline __attribute__((always_inline))
+
+// A few functions return bool type starting only in C++11.
+#pragma push_macro("__RETURN_TYPE")
+#if defined(__cplusplus)
+#define __RETURN_TYPE bool
+#else
+#define __RETURN_TYPE int
+#endif
+
+#if defined (__cplusplus) && __cplusplus < 201103L
+// emulate static_assert on type sizes
+template<bool>
+struct __compare_result{};
+template<>
+struct __compare_result<true> {
+  static const bool valid;
+};
+
+__DEVICE__
+void __suppress_unused_warning(bool b){};
+template <unsigned int S, unsigned int T>
+__DEVICE__ void __static_assert_equal_size() {
+  __suppress_unused_warning(__compare_result<S == T>::valid);
+}
+
+#define __static_assert_type_size_equal(A, B) \
+  __static_assert_equal_size<A,B>()
+
+#else
+#define __static_assert_type_size_equal(A,B) \
+  static_assert((A) == (B), "")
+
+#endif
+
+__DEVICE__
+uint64_t __make_mantissa_base8(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '7')
+      __r = (__r * 8u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+uint64_t __make_mantissa_base10(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 10u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+uint64_t __make_mantissa_base16(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 16u) + __tmp - '0';
+    else if (__tmp >= 'a' && __tmp <= 'f')
+      __r = (__r * 16u) + __tmp - 'a' + 10;
+    else if (__tmp >= 'A' && __tmp <= 'F')
+      __r = (__r * 16u) + __tmp - 'A' + 10;
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+uint64_t __make_mantissa(const char *__tagp) {
+  if (!__tagp)
+    return 0u;
+
+  if (*__tagp == '0') {
+    ++__tagp;
+
+    if (*__tagp == 'x' || *__tagp == 'X')
+      return __make_mantissa_base16(__tagp);
+    else
+      return __make_mantissa_base8(__tagp);
+  }
+
+  return __make_mantissa_base10(__tagp);
+}
+
+// BEGIN FLOAT
+#if defined(__cplusplus)
+__DEVICE__
+int abs(int __x) {
+  int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+long labs(long __x) {
+  long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+long long llabs(long long __x) {
+  long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+#endif
+
+__DEVICE__
+float acosf(float __x) { return __ocml_acos_f32(__x); }
+
+__DEVICE__
+float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+
+__DEVICE__
+float asinf(float __x) { return __ocml_asin_f32(__x); }
+
+__DEVICE__
+float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+
+__DEVICE__
+float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+
+__DEVICE__
+float atanf(float __x) { return __ocml_atan_f32(__x); }
+
+__DEVICE__
+float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+
+__DEVICE__
+float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+
+__DEVICE__
+float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+
+__DEVICE__
+float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
+
+__DEVICE__
+float cosf(float __x) { return __ocml_cos_f32(__x); }
+
+__DEVICE__
+float coshf(float __x) { return __ocml_cosh_f32(__x); }
+
+__DEVICE__
+float cospif(float __x) { return __ocml_cospi_f32(__x); }
+
+__DEVICE__
+float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+
+__DEVICE__
+float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+
+__DEVICE__
+float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+
+__DEVICE__
+float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+
+__DEVICE__
+float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+
+__DEVICE__
+float erff(float __x) { return __ocml_erf_f32(__x); }
+
+__DEVICE__
+float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+
+__DEVICE__
+float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+
+__DEVICE__
+float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+
+__DEVICE__
+float expf(float __x) { return __ocml_exp_f32(__x); }
+
+__DEVICE__
+float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+
+__DEVICE__
+float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+
+__DEVICE__
+float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+
+__DEVICE__
+float fdividef(float __x, float __y) { return __x / __y; }
+
+__DEVICE__
+float floorf(float __x) { return __ocml_floor_f32(__x); }
+
+__DEVICE__
+float fmaf(float __x, float __y, float __z) {
+  return __ocml_fma_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+
+__DEVICE__
+float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+
+__DEVICE__
+float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+
+__DEVICE__
+float frexpf(float __x, int *__nptr) {
+  int __tmp;
+  float __r =
+      __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+
+__DEVICE__
+int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+
+__DEVICE__
+__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
+
+__DEVICE__
+float j0f(float __x) { return __ocml_j0_f32(__x); }
+
+__DEVICE__
+float j1f(float __x) { return __ocml_j1_f32(__x); }
+
+__DEVICE__
+float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
+                                // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  float __x0 = j0f(__x);
+  float __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+__DEVICE__
+float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+
+__DEVICE__
+float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+
+__DEVICE__
+long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+
+__DEVICE__
+long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+
+__DEVICE__
+float log10f(float __x) { return __ocml_log10_f32(__x); }
+
+__DEVICE__
+float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+
+__DEVICE__
+float log2f(float __x) { return __ocml_log2_f32(__x); }
+
+__DEVICE__
+float logbf(float __x) { return __ocml_logb_f32(__x); }
+
+__DEVICE__
+float logf(float __x) { return __ocml_log_f32(__x); }
+
+__DEVICE__
+long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+
+__DEVICE__
+long int lroundf(float __x) { return __ocml_round_f32(__x); }
+
+__DEVICE__
+float modff(float __x, float *__iptr) {
+  float __tmp;
+  float __r =
+      __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__iptr = __tmp;
+  return __r;
+}
+
+__DEVICE__
+float nanf(const char *__tagp) {
+  union {
+    float val;
+    struct ieee_float {
+      unsigned int mantissa : 22;
+      unsigned int quiet : 1;
+      unsigned int exponent : 8;
+      unsigned int sign : 1;
+    } bits;
+  } __tmp;
+  __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+}
+
+__DEVICE__
+float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+
+__DEVICE__
+float nextafterf(float __x, float __y) {
+  return __ocml_nextafter_f32(__x, __y);
+}
+
+__DEVICE__
+float norm3df(float __x, float __y, float __z) {
+  return __ocml_len3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float norm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_len4_f32(__x, __y, __z, __w);
+}
+
+__DEVICE__
+float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+
+__DEVICE__
+float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+
+__DEVICE__
+float normf(int __dim,
+            const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f32(__r);
+}
+
+__DEVICE__
+float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+
+__DEVICE__
+float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); }
+
+__DEVICE__
+float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+
+__DEVICE__
+float remainderf(float __x, float __y) {
+  return __ocml_remainder_f32(__x, __y);
+}
+
+__DEVICE__
+float remquof(float __x, float __y, int *__quo) {
+  int __tmp;
+  float __r = __ocml_remquo_f32(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
+
+__DEVICE__
+float rintf(float __x) { return __ocml_rint_f32(__x); }
+
+__DEVICE__
+float rnorm3df(float __x, float __y, float __z) {
+  return __ocml_rlen3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float rnorm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_rlen4_f32(__x, __y, __z, __w);
+}
+
+__DEVICE__
+float rnormf(int __dim,
+             const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f32(__r);
+}
+
+__DEVICE__
+float roundf(float __x) { return __ocml_round_f32(__x); }
+
+__DEVICE__
+float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+
+__DEVICE__
+float scalblnf(float __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+                         : __ocml_scalb_f32(__x, __n);
+}
+
+__DEVICE__
+float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+
+__DEVICE__
+__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
+
+__DEVICE__
+void sincosf(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+  *__sinptr =
+      __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+void sincospif(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+  *__sinptr = __ocml_sincospi_f32(
+      __x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+float sinf(float __x) { return __ocml_sin_f32(__x); }
+
+__DEVICE__
+float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+
+__DEVICE__
+float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+
+__DEVICE__
+float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+
+__DEVICE__
+float tanf(float __x) { return __ocml_tan_f32(__x); }
+
+__DEVICE__
+float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+
+__DEVICE__
+float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+
+__DEVICE__
+float truncf(float __x) { return __ocml_trunc_f32(__x); }
+
+__DEVICE__
+float y0f(float __x) { return __ocml_y0_f32(__x); }
+
+__DEVICE__
+float y1f(float __x) { return __ocml_y1_f32(__x); }
+
+__DEVICE__
+float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication
+                                // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return y0f(__x);
+  if (__n == 1)
+    return y1f(__x);
+
+  float __x0 = y0f(__x);
+  float __x1 = y1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+
+__DEVICE__
+float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+
+__DEVICE__
+float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+
+__DEVICE__
+float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }
+
+__DEVICE__
+float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fdividef(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmaf_rd(float __x, float __y, float __z) {
+  return __ocml_fma_rtn_f32(__x, __y, __z);
+}
+#endif
+__DEVICE__
+float __fmaf_rn(float __x, float __y, float __z) {
+  return __ocml_fma_rte_f32(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmaf_ru(float __x, float __y, float __z) {
+  return __ocml_fma_rtp_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float __fmaf_rz(float __x, float __y, float __z) {
+  return __ocml_fma_rtz_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }
+
+__DEVICE__
+float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+
+__DEVICE__
+float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+#endif
+__DEVICE__
+float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+
+__DEVICE__
+float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+
+__DEVICE__
+float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }
+#endif
+__DEVICE__
+float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+
+__DEVICE__
+float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+
+__DEVICE__
+float __logf(float __x) { return __ocml_native_log_f32(__x); }
+
+__DEVICE__
+float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+
+__DEVICE__
+float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }
+
+__DEVICE__
+void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+  *__sinptr = __ocml_native_sin_f32(__x);
+  *__cosptr = __ocml_native_cos_f32(__x);
+}
+
+__DEVICE__
+float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+
+__DEVICE__
+float __tanf(float __x) { return __ocml_tan_f32(__x); }
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__DEVICE__
+double acos(double __x) { return __ocml_acos_f64(__x); }
+
+__DEVICE__
+double acosh(double __x) { return __ocml_acosh_f64(__x); }
+
+__DEVICE__
+double asin(double __x) { return __ocml_asin_f64(__x); }
+
+__DEVICE__
+double asinh(double __x) { return __ocml_asinh_f64(__x); }
+
+__DEVICE__
+double atan(double __x) { return __ocml_atan_f64(__x); }
+
+__DEVICE__
+double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); }
+
+__DEVICE__
+double atanh(double __x) { return __ocml_atanh_f64(__x); }
+
+__DEVICE__
+double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+
+__DEVICE__
+double ceil(double __x) { return __ocml_ceil_f64(__x); }
+
+__DEVICE__
+double copysign(double __x, double __y) {
+  return __ocml_copysign_f64(__x, __y);
+}
+
+__DEVICE__
+double cos(double __x) { return __ocml_cos_f64(__x); }
+
+__DEVICE__
+double cosh(double __x) { return __ocml_cosh_f64(__x); }
+
+__DEVICE__
+double cospi(double __x) { return __ocml_cospi_f64(__x); }
+
+__DEVICE__
+double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+
+__DEVICE__
+double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+
+__DEVICE__
+double erf(double __x) { return __ocml_erf_f64(__x); }
+
+__DEVICE__
+double erfc(double __x) { return __ocml_erfc_f64(__x); }
+
+__DEVICE__
+double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+
+__DEVICE__
+double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+
+__DEVICE__
+double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+
+__DEVICE__
+double exp(double __x) { return __ocml_exp_f64(__x); }
+
+__DEVICE__
+double exp10(double __x) { return __ocml_exp10_f64(__x); }
+
+__DEVICE__
+double exp2(double __x) { return __ocml_exp2_f64(__x); }
+
+__DEVICE__
+double expm1(double __x) { return __ocml_expm1_f64(__x); }
+
+__DEVICE__
+double fabs(double __x) { return __ocml_fabs_f64(__x); }
+
+__DEVICE__
+double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+
+__DEVICE__
+double floor(double __x) { return __ocml_floor_f64(__x); }
+
+__DEVICE__
+double fma(double __x, double __y, double __z) {
+  return __ocml_fma_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+
+__DEVICE__
+double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+
+__DEVICE__
+double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+
+__DEVICE__
+double frexp(double __x, int *__nptr) {
+  int __tmp;
+  double __r =
+      __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+  return __r;
+}
+
+__DEVICE__
+double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); }
+
+__DEVICE__
+int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+
+__DEVICE__
+__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
+
+__DEVICE__
+double j0(double __x) { return __ocml_j0_f64(__x); }
+
+__DEVICE__
+double j1(double __x) { return __ocml_j1_f64(__x); }
+
+__DEVICE__
+double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
+                                 // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return j0(__x);
+  if (__n == 1)
+    return j1(__x);
+
+  double __x0 = j0(__x);
+  double __x1 = j1(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+  return __x1;
+}
+
+__DEVICE__
+double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+
+__DEVICE__
+double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+
+__DEVICE__
+long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+
+__DEVICE__
+long long int llround(double __x) { return __ocml_round_f64(__x); }
+
+__DEVICE__
+double log(double __x) { return __ocml_log_f64(__x); }
+
+__DEVICE__
+double log10(double __x) { return __ocml_log10_f64(__x); }
+
+__DEVICE__
+double log1p(double __x) { return __ocml_log1p_f64(__x); }
+
+__DEVICE__
+double log2(double __x) { return __ocml_log2_f64(__x); }
+
+__DEVICE__
+double logb(double __x) { return __ocml_logb_f64(__x); }
+
+__DEVICE__
+long int lrint(double __x) { return __ocml_rint_f64(__x); }
+
+__DEVICE__
+long int lround(double __x) { return __ocml_round_f64(__x); }
+
+__DEVICE__
+double modf(double __x, double *__iptr) {
+  double __tmp;
+  double __r =
+      __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__iptr = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+double nan(const char *__tagp) {
+#if !_WIN32
+  union {
+    double val;
+    struct ieee_double {
+      uint64_t mantissa : 51;
+      uint32_t quiet : 1;
+      uint32_t exponent : 11;
+      uint32_t sign : 1;
+    } bits;
+  } __tmp;
+  __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+#else
+  __static_assert_type_size_equal(sizeof(uint64_t), sizeof(double));
+  uint64_t __val = __make_mantissa(__tagp);
+  __val |= 0xFFF << 51;
+  return *reinterpret_cast<double *>(&__val);
+#endif
+}
+
+__DEVICE__
+double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+
+__DEVICE__
+double nextafter(double __x, double __y) {
+  return __ocml_nextafter_f64(__x, __y);
+}
+
+__DEVICE__
+double norm(int __dim,
+            const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f64(__r);
+}
+
+__DEVICE__
+double norm3d(double __x, double __y, double __z) {
+  return __ocml_len3_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double norm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_len4_f64(__x, __y, __z, __w);
+}
+
+__DEVICE__
+double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+
+__DEVICE__
+double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+
+__DEVICE__
+double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+
+__DEVICE__
+double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); }
+
+__DEVICE__
+double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+
+__DEVICE__
+double remainder(double __x, double __y) {
+  return __ocml_remainder_f64(__x, __y);
+}
+
+__DEVICE__
+double remquo(double __x, double __y, int *__quo) {
+  int __tmp;
+  double __r = __ocml_remquo_f64(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
+
+__DEVICE__
+double rint(double __x) { return __ocml_rint_f64(__x); }
+
+__DEVICE__
+double rnorm(int __dim,
+             const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f64(__r);
+}
+
+__DEVICE__
+double rnorm3d(double __x, double __y, double __z) {
+  return __ocml_rlen3_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double rnorm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_rlen4_f64(__x, __y, __z, __w);
+}
+
+__DEVICE__
+double round(double __x) { return __ocml_round_f64(__x); }
+
+__DEVICE__
+double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+
+__DEVICE__
+double scalbln(double __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+                         : __ocml_scalb_f64(__x, __n);
+}
+__DEVICE__
+double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
+
+__DEVICE__
+__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
+
+__DEVICE__
+double sin(double __x) { return __ocml_sin_f64(__x); }
+
+__DEVICE__
+void sincos(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincos_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+void sincospi(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincospi_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+double sinh(double __x) { return __ocml_sinh_f64(__x); }
+
+__DEVICE__
+double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+
+__DEVICE__
+double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+
+__DEVICE__
+double tan(double __x) { return __ocml_tan_f64(__x); }
+
+__DEVICE__
+double tanh(double __x) { return __ocml_tanh_f64(__x); }
+
+__DEVICE__
+double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+
+__DEVICE__
+double trunc(double __x) { return __ocml_trunc_f64(__x); }
+
+__DEVICE__
+double y0(double __x) { return __ocml_y0_f64(__x); }
+
+__DEVICE__
+double y1(double __x) { return __ocml_y1_f64(__x); }
+
+__DEVICE__
+double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication
+                                 // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return y0(__x);
+  if (__n == 1)
+    return y1(__x);
+
+  double __x0 = y0(__x);
+  double __x1 = y1(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dadd_rd(double __x, double __y) {
+  return __ocml_add_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __dadd_rn(double __x, double __y) {
+  return __ocml_add_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dadd_ru(double __x, double __y) {
+  return __ocml_add_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __dadd_rz(double __x, double __y) {
+  return __ocml_add_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __ddiv_rd(double __x, double __y) {
+  return __ocml_div_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __ddiv_rn(double __x, double __y) {
+  return __ocml_div_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __ddiv_ru(double __x, double __y) {
+  return __ocml_div_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __ddiv_rz(double __x, double __y) {
+  return __ocml_div_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __dmul_rd(double __x, double __y) {
+  return __ocml_mul_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __dmul_rn(double __x, double __y) {
+  return __ocml_mul_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dmul_ru(double __x, double __y) {
+  return __ocml_mul_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __dmul_rz(double __x, double __y) {
+  return __ocml_mul_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#endif
+__DEVICE__
+double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+
+__DEVICE__
+double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+
+__DEVICE__
+double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
+#endif
+__DEVICE__
+double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
+
+__DEVICE__
+double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+
+__DEVICE__
+double __dsub_rd(double __x, double __y) {
+  return __ocml_sub_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __dsub_rn(double __x, double __y) {
+  return __ocml_sub_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dsub_ru(double __x, double __y) {
+  return __ocml_sub_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __dsub_rz(double __x, double __y) {
+  return __ocml_sub_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __fma_rd(double __x, double __y, double __z) {
+  return __ocml_fma_rtn_f64(__x, __y, __z);
+}
+#endif
+__DEVICE__
+double __fma_rn(double __x, double __y, double __z) {
+  return __ocml_fma_rte_f64(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __fma_ru(double __x, double __y, double __z) {
+  return __ocml_fma_rtp_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double __fma_rz(double __x, double __y, double __z) {
+  return __ocml_fma_rtz_f64(__x, __y, __z);
+}
+#endif
+// END INTRINSICS
+// END DOUBLE
+
+// C only macros
+#if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
+#define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x)
+#define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x)
+#define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x)
+#define signbit(__x)                                                           \
+  _Generic((__x), float : __signbitf, double : __signbit)(__x)
+#endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
+
+#if defined(__cplusplus)
+template <class T> __DEVICE__ T min(T __arg1, T __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+
+template <class T> __DEVICE__ T max(T __arg1, T __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__ int min(int __arg1, int __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+__DEVICE__ int max(int __arg1, int __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__
+float max(float __x, float __y) { return fmaxf(__x, __y); }
+
+__DEVICE__
+double max(double __x, double __y) { return fmax(__x, __y); }
+
+__DEVICE__
+float min(float __x, float __y) { return fminf(__x, __y); }
+
+__DEVICE__
+double min(double __x, double __y) { return fmin(__x, __y); }
+
+__host__ inline static int min(int __arg1, int __arg2) {
+  return std::min(__arg1, __arg2);
+}
+
+__host__ inline static int max(int __arg1, int __arg2) {
+  return std::max(__arg1, __arg2);
+}
+#endif
+
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__RETURN_TYPE")
+
+#endif // __CLANG_HIP_MATH_H__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_runtime_wrapper.h
similarity index 93%
rename from darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
rename to darwin-x86/lib64/clang/12.0.1/include/__clang_hip_runtime_wrapper.h
index addae56..81a16a2 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/__clang_hip_runtime_wrapper.h
@@ -28,6 +28,10 @@
 #define __shared__ __attribute__((shared))
 #define __constant__ __attribute__((constant))
 
+#if !defined(__cplusplus) || __cplusplus < 201103L
+  #define nullptr NULL;
+#endif
+
 #if __HIP_ENABLE_DEVICE_MALLOC__
 extern "C" __device__ void *__hip_malloc(size_t __size);
 extern "C" __device__ void *__hip_free(void *__ptr);
@@ -51,6 +55,7 @@
 
 #if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
 #include <__clang_cuda_math_forward_declares.h>
+#include <__clang_hip_cmath.h>
 #include <__clang_cuda_complex_builtins.h>
 
 #include <algorithm>
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h b/darwin-x86/lib64/clang/12.0.1/include/__stddef_max_align_t.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h
rename to darwin-x86/lib64/clang/12.0.1/include/__stddef_max_align_t.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h b/darwin-x86/lib64/clang/12.0.1/include/__wmmintrin_aes.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h
rename to darwin-x86/lib64/clang/12.0.1/include/__wmmintrin_aes.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h b/darwin-x86/lib64/clang/12.0.1/include/__wmmintrin_pclmul.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h
rename to darwin-x86/lib64/clang/12.0.1/include/__wmmintrin_pclmul.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/adxintrin.h b/darwin-x86/lib64/clang/12.0.1/include/adxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/adxintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/altivec.h b/darwin-x86/lib64/clang/12.0.1/include/altivec.h
similarity index 94%
rename from darwin-x86/lib64/clang/11.0.5/include/altivec.h
rename to darwin-x86/lib64/clang/12.0.1/include/altivec.h
index 9a40092..1d7bc20 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/altivec.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/altivec.h
@@ -1709,6 +1709,20 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+#endif
+
 #ifdef __POWER9_VECTOR__
 /* vec_cmpne */
 
@@ -1766,36 +1780,26 @@
                                                     (vector int)__b);
 }
 
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
 static __inline__ vector bool int __ATTRS_o_ai
 vec_cmpne(vector float __a, vector float __b) {
   return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
                                                     (vector int)__b);
 }
 
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector double __a, vector double __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b));
 }
 
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
+  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b));
+}
+#endif
+
 /* vec_cmpnez */
 
 static __inline__ vector bool char __ATTRS_o_ai
@@ -1900,6 +1904,86 @@
   return __builtin_altivec_vprtybd(__a);
 }
 
+#else
+/* vec_cmpne */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector bool char __a, vector bool char __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector signed char __a, vector signed char __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector bool short __a, vector bool short __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector signed short __a, vector signed short __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector bool int __a, vector bool int __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector signed int __a, vector signed int __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector float __a, vector float __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector bool long long __a, vector bool long long __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector signed long long __a, vector signed long long __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+#endif
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector double __a, vector double __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
 #endif
 
 /* vec_cmpgt */
@@ -1962,6 +2046,20 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpgtsq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpgtuq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+#endif
+
 /* vec_cmpge */
 
 static __inline__ vector bool char __ATTRS_o_ai
@@ -2022,6 +2120,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpge(vector signed __int128 __a, vector signed __int128 __b) {
+  return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpge(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
 /* vec_vcmpgefp */
 
 static __inline__ vector bool int __attribute__((__always_inline__))
@@ -2134,6 +2244,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmple(vector signed __int128 __a, vector signed __int128 __b) {
+  return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmple(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return vec_cmpge(__b, __a);
+}
+#endif
+
 /* vec_cmplt */
 
 static __inline__ vector bool char __ATTRS_o_ai
@@ -2178,6 +2300,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmplt(vector signed __int128 __a, vector signed __int128 __b) {
+  return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmplt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return vec_cmpgt(__b, __a);
+}
+#endif
+
 #ifdef __POWER8_VECTOR__
 static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmplt(vector signed long long __a, vector signed long long __b) {
@@ -2702,67 +2836,67 @@
 }
 
 #if defined(__powerpc64__)
-static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
+static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(const signed char *__a,
                                                              size_t __b) {
   return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len(unsigned char *__a, size_t __b) {
+vec_xl_len(const unsigned char *__a, size_t __b) {
   return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
+static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(const signed short *__a,
                                                               size_t __b) {
   return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_len(unsigned short *__a, size_t __b) {
+vec_xl_len(const unsigned short *__a, size_t __b) {
   return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
+static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(const signed int *__a,
                                                             size_t __b) {
   return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
+static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(const unsigned int *__a,
                                                               size_t __b) {
   return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
+static __inline__ vector float __ATTRS_o_ai vec_xl_len(const float *__a, size_t __b) {
   return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_len(signed __int128 *__a, size_t __b) {
+vec_xl_len(const signed __int128 *__a, size_t __b) {
   return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_len(unsigned __int128 *__a, size_t __b) {
+vec_xl_len(const unsigned __int128 *__a, size_t __b) {
   return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_len(signed long long *__a, size_t __b) {
+vec_xl_len(const signed long long *__a, size_t __b) {
   return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_len(unsigned long long *__a, size_t __b) {
+vec_xl_len(const unsigned long long *__a, size_t __b) {
   return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
+static __inline__ vector double __ATTRS_o_ai vec_xl_len(const double *__a,
                                                         size_t __b) {
   return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len_r(unsigned char *__a, size_t __b) {
+vec_xl_len_r(const unsigned char *__a, size_t __b) {
   vector unsigned char __res =
       (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
 #ifdef __LITTLE_ENDIAN__
@@ -2951,6 +3085,42 @@
 
 #define vec_vctuxs __builtin_altivec_vctuxs
 
+/* vec_signext */
+
+#ifdef __POWER9_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signexti(vector signed char __a) {
+  return __builtin_altivec_vextsb2w(__a);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signexti(vector signed short __a) {
+  return __builtin_altivec_vextsh2w(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed char __a) {
+  return __builtin_altivec_vextsb2d(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed short __a) {
+  return __builtin_altivec_vextsh2d(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed int __a) {
+  return __builtin_altivec_vextsw2d(__a);
+}
+#endif
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_signextq(vector signed long long __a) {
+  return __builtin_altivec_vextsd2q(__a);
+}
+#endif
+
 /* vec_signed */
 
 static __inline__ vector signed int __ATTRS_o_ai
@@ -3288,6 +3458,66 @@
 }
 #endif
 
+/* vec_dive */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_dive(vector signed int __a, vector signed int __b) {
+  return __builtin_altivec_vdivesw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_dive(vector unsigned int __a, vector unsigned int __b) {
+  return __builtin_altivec_vdiveuw(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_dive(vector signed long long __a, vector signed long long __b) {
+  return __builtin_altivec_vdivesd(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_dive(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vdiveud(__a, __b);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_dive(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __builtin_altivec_vdiveuq(__a, __b);
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_dive(vector signed __int128 __a, vector signed __int128 __b) {
+  return __builtin_altivec_vdivesq(__a, __b);
+}
+#endif
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_div(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __a / __b;
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_div(vector signed __int128 __a, vector signed __int128 __b) {
+  return __a / __b;
+}
+#endif __POWER10_VECTOR__
+
+/* vec_xvtdiv */
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_test_swdiv(vector double __a,
+                                                  vector double __b) {
+  return __builtin_vsx_xvtdivdp(__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_test_swdivs(vector float __a,
+                                                   vector float __b) {
+  return __builtin_vsx_xvtdivsp(__a, __b);
+}
+#endif
+
 /* vec_dss */
 
 #define vec_dss __builtin_altivec_dss
@@ -3300,23 +3530,19 @@
 
 /* vec_dst */
 #define vec_dst(__PTR, __CW, __STR) \
-  __extension__(                    \
-      { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_dstst */
 #define vec_dstst(__PTR, __CW, __STR) \
-  __extension__(                      \
-      { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_dststt */
 #define vec_dststt(__PTR, __CW, __STR) \
-  __extension__(                       \
-      { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_dstt */
 #define vec_dstt(__PTR, __CW, __STR) \
-  __extension__(                     \
-      { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_eqv */
 
@@ -5467,6 +5693,16 @@
   return __builtin_altivec_vmsumuhm(__a, __b, __c);
 }
 
+/* vec_msumc */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_msumc(vector unsigned long long __a, vector unsigned long long __b,
+          vector unsigned __int128 __c) {
+  return __builtin_altivec_vmsumcud(__a, __b, __c);
+}
+#endif
+
 /* vec_vmsummbm */
 
 static __inline__ vector int __attribute__((__always_inline__))
@@ -5693,6 +5929,26 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mule(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmulosd(__a, __b);
+#else
+  return __builtin_altivec_vmulesd(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mule(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmuloud(__a, __b);
+#else
+  return __builtin_altivec_vmuleud(__a, __b);
+#endif
+}
+#endif
+
 /* vec_vmulesb */
 
 static __inline__ vector short __attribute__((__always_inline__))
@@ -5737,6 +5993,30 @@
 #endif
 }
 
+/* vec_mulh */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mulh(vector signed int __a, vector signed int __b) {
+  return __builtin_altivec_vmulhsw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+  return __builtin_altivec_vmulhuw(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mulh(vector signed long long __a, vector signed long long __b) {
+  return __builtin_altivec_vmulhsd(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mulh(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vmulhud(__a, __b);
+}
+#endif
+
 /* vec_mulo */
 
 static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
@@ -5795,6 +6075,26 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mulo(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmulesd(__a, __b);
+#else
+  return __builtin_altivec_vmulosd(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mulo(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmuleud(__a, __b);
+#else
+  return __builtin_altivec_vmuloud(__a, __b);
+#endif
+}
+#endif
+
 /* vec_vmulosb */
 
 static __inline__ vector short __attribute__((__always_inline__))
@@ -7771,6 +8071,18 @@
   return __builtin_altivec_vrsqrtefp(__a);
 }
 
+/* vec_xvtsqrt */
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_test_swsqrt(vector double __a) {
+  return __builtin_vsx_xvtsqrtdp(__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_test_swsqrts(vector float __a) {
+  return __builtin_vsx_xvtsqrtsp(__a);
+}
+#endif
+
 /* vec_sel */
 
 #define __builtin_altivec_vsel_4si vec_sel
@@ -13900,6 +14212,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+}
+#endif
+
 /* vec_all_ge */
 
 static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
@@ -14071,6 +14395,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __b, __a);
+}
+#endif
+
 /* vec_all_gt */
 
 static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
@@ -14242,6 +14578,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __a, __b);
+}
+#endif
+
 /* vec_all_in */
 
 static __inline__ int __attribute__((__always_inline__))
@@ -14421,6 +14769,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __a, __b);
+}
+#endif
+
 /* vec_all_lt */
 
 static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
@@ -14593,6 +14953,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __b, __a);
+}
+#endif
+
 /* vec_all_nan */
 
 static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
@@ -14797,6 +15169,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+}
+#endif
+
 /* vec_all_nge */
 
 static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
@@ -15042,6 +15426,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
 /* vec_any_ge */
 
 static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
@@ -15221,6 +15617,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __b, __a);
+}
+#endif
+
 /* vec_any_gt */
 
 static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
@@ -15400,6 +15808,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
 /* vec_any_le */
 
 static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
@@ -15579,6 +15999,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
 /* vec_any_lt */
 
 static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
@@ -15758,6 +16190,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
 /* vec_any_nan */
 
 static __inline__ int __attribute__((__always_inline__))
@@ -15953,6 +16397,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
 /* vec_any_nge */
 
 static __inline__ int __attribute__((__always_inline__))
@@ -16353,41 +16809,41 @@
 typedef vector float unaligned_vec_float __attribute__((aligned(1)));
 
 static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
-                                                     signed char *__ptr) {
+                                                     const signed char *__ptr) {
   return *(unaligned_vec_schar *)(__ptr + __offset);
 }
 
 static inline __ATTRS_o_ai vector unsigned char
-vec_xl(signed long long __offset, unsigned char *__ptr) {
+vec_xl(signed long long __offset, const unsigned char *__ptr) {
   return *(unaligned_vec_uchar*)(__ptr + __offset);
 }
 
 static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
-                                                      signed short *__ptr) {
+                                                      const signed short *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_sshort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned short
-vec_xl(signed long long __offset, unsigned short *__ptr) {
+vec_xl(signed long long __offset, const unsigned short *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_ushort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
-                                                    signed int *__ptr) {
+                                                    const signed int *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_sint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
-                                                      unsigned int *__ptr) {
+                                                      const unsigned int *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_uint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
-                                               float *__ptr) {
+                                               const float *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_float *)__addr;
 }
@@ -16398,19 +16854,19 @@
 typedef vector double unaligned_vec_double __attribute__((aligned(1)));
 
 static inline __ATTRS_o_ai vector signed long long
-vec_xl(signed long long __offset, signed long long *__ptr) {
+vec_xl(signed long long __offset, const signed long long *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_sll *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(signed long long __offset, unsigned long long *__ptr) {
+vec_xl(signed long long __offset, const unsigned long long *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_ull *)__addr;
 }
 
 static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
-                                                double *__ptr) {
+                                                const double *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_double *)__addr;
 }
@@ -16421,13 +16877,13 @@
 typedef vector unsigned __int128 unaligned_vec_ui128
     __attribute__((aligned(1)));
 static inline __ATTRS_o_ai vector signed __int128
-vec_xl(signed long long __offset, signed __int128 *__ptr) {
+vec_xl(signed long long __offset, const signed __int128 *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_si128 *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
+vec_xl(signed long long __offset, const unsigned __int128 *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_ui128 *)__addr;
 }
@@ -16437,71 +16893,71 @@
 
 #ifdef __LITTLE_ENDIAN__
 static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed char *__ptr) {
+vec_xl_be(signed long long __offset, const signed char *__ptr) {
   vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned char *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned char *__ptr) {
   vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector signed short  __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed short *__ptr) {
+vec_xl_be(signed long long __offset, const signed short *__ptr) {
   vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned short *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned short *__ptr) {
   vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
 static __inline__ vector signed int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed int *__ptr) {
+vec_xl_be(signed long long  __offset, const signed int *__ptr) {
   return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned int *__ptr) {
+vec_xl_be(signed long long  __offset, const unsigned int *__ptr) {
   return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
 }
 
 static __inline__ vector float __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, float *__ptr) {
+vec_xl_be(signed long long  __offset, const float *__ptr) {
   return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
 }
 
 #ifdef __VSX__
 static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed long long *__ptr) {
+vec_xl_be(signed long long  __offset, const signed long long *__ptr) {
   return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned long long *__ptr) {
+vec_xl_be(signed long long  __offset, const unsigned long long *__ptr) {
   return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
 }
 
 static __inline__ vector double __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, double *__ptr) {
+vec_xl_be(signed long long  __offset, const double *__ptr) {
   return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
 }
 #endif
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed __int128 *__ptr) {
+vec_xl_be(signed long long  __offset, const signed __int128 *__ptr) {
   return vec_xl(__offset, __ptr);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned __int128 *__ptr) {
+vec_xl_be(signed long long  __offset, const unsigned __int128 *__ptr) {
   return vec_xl(__offset, __ptr);
 }
 #endif
@@ -16509,6 +16965,54 @@
   #define vec_xl_be vec_xl
 #endif
 
+#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+
+/* vect_xl_sext */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed char *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed short *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed int *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed long long *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+/* vec_xl_zext */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned char *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned short *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned int *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned long long *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+#endif
+
 /* vec_xst */
 
 static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
@@ -16597,6 +17101,58 @@
 }
 #endif
 
+/* vec_xst_trunc */
+
+#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed char *__ptr) {
+  *(__ptr + __offset) = (signed char)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned char *__ptr) {
+  *(__ptr + __offset) = (unsigned char)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed short *__ptr) {
+  *(__ptr + __offset) = (signed short)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned short *__ptr) {
+  *(__ptr + __offset) = (unsigned short)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed int *__ptr) {
+  *(__ptr + __offset) = (signed int)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned int *__ptr) {
+  *(__ptr + __offset) = (unsigned int)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed long long *__ptr) {
+  *(__ptr + __offset) = (signed long long)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned long long *__ptr) {
+  *(__ptr + __offset) = (unsigned long long)__vec[0];
+}
+#endif
+
 /* vec_xst_be */
 
 #ifdef __LITTLE_ENDIAN__
@@ -16763,6 +17319,100 @@
 }
 
 #ifdef __POWER10_VECTOR__
+
+/* vec_extractm */
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned char __a) {
+  return __builtin_altivec_vextractbm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned short __a) {
+  return __builtin_altivec_vextracthm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned int __a) {
+  return __builtin_altivec_vextractwm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned long long __a) {
+  return __builtin_altivec_vextractdm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned __int128 __a) {
+  return __builtin_altivec_vextractqm(__a);
+}
+
+/* vec_expandm */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_expandm(vector unsigned char __a) {
+  return __builtin_altivec_vexpandbm(__a);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_expandm(vector unsigned short __a) {
+  return __builtin_altivec_vexpandhm(__a);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_expandm(vector unsigned int __a) {
+  return __builtin_altivec_vexpandwm(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_expandm(vector unsigned long long __a) {
+  return __builtin_altivec_vexpanddm(__a);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_expandm(vector unsigned __int128 __a) {
+  return __builtin_altivec_vexpandqm(__a);
+}
+
+/* vec_cntm */
+
+#define vec_cntm(__a, __mp)                                                    \
+  _Generic((__a), vector unsigned char                                         \
+           : __builtin_altivec_vcntmbb((__a), (unsigned int)(__mp)),           \
+             vector unsigned short                                             \
+           : __builtin_altivec_vcntmbh((__a), (unsigned int)(__mp)),           \
+             vector unsigned int                                               \
+           : __builtin_altivec_vcntmbw((__a), (unsigned int)(__mp)),           \
+             vector unsigned long long                                         \
+           : __builtin_altivec_vcntmbd((__a), (unsigned int)(__mp)))
+
+/* vec_gen[b|h|w|d|q]m */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_genbm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrbm(__bm);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_genhm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrhm(__bm);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_genwm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrwm(__bm);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_gendm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrdm(__bm);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_genqm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrqm(__bm);
+}
+
 /* vec_pdep */
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -16881,6 +17531,38 @@
   return __builtin_altivec_vctzdm(__a, __b);
 }
 
+/* vec_mod */
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mod(vector signed int __a, vector signed int __b) {
+  return __a % __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mod(vector unsigned int __a, vector unsigned int __b) {
+  return __a % __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mod(vector signed long long __a, vector signed long long __b) {
+  return __a % __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mod(vector unsigned long long __a, vector unsigned long long __b) {
+  return __a % __b;
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mod(vector signed __int128 __a, vector signed __int128 __b) {
+  return __a % __b;
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return  __a % __b;
+}
+
 /* vec_sldbi */
 
 #define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
@@ -17027,6 +17709,92 @@
 #endif
 }
 
+/* vec_extractl */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+    vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextdubvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextdubvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+    vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduhvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduhvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+    vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduwvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduwvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extractl(vector unsigned long long __a, vector unsigned long long __b,
+             unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextddvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextddvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+/* vec_extracth */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+    vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextdubvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextdubvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+    vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduhvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduhvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+    vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduwvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduwvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extracth(vector unsigned long long __a, vector unsigned long long __b,
+             unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextddvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextddvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
 #ifdef __VSX__
 
 /* vec_permx */
@@ -17095,6 +17863,14 @@
   return __builtin_vsx_xxblendvd(__a, __b, __c);
 }
 
+/* vec_replace_elt */
+
+#define vec_replace_elt __builtin_altivec_vec_replace_elt
+
+/* vec_replace_unaligned */
+
+#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
+
 /* vec_splati */
 
 #define vec_splati(__a)                                                        \
@@ -17146,7 +17922,212 @@
 #endif
   return __a;
 }
+
+/* vec_test_lsbb_all_ones */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_ones(vector unsigned char __a) {
+  return __builtin_vsx_xvtlsbb(__a, 1);
+}
+
+/* vec_test_lsbb_all_zeros */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_zeros(vector unsigned char __a) {
+  return __builtin_vsx_xvtlsbb(__a, 0);
+}
 #endif /* __VSX__ */
+
+/* vec_stril */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_stril(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr((vector signed char)__a);
+#else
+  return __builtin_altivec_vstribl((vector signed char)__a);
+#endif
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_stril(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr(__a);
+#else
+  return __builtin_altivec_vstribl(__a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_stril(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr((vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihl((vector signed short)__a);
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_stril(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr(__a);
+#else
+  return __builtin_altivec_vstrihl(__a);
+#endif
+}
+
+/* vec_stril_p */
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+#else
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
+#endif
+}
+
+/* vec_strir */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_strir(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl((vector signed char)__a);
+#else
+  return __builtin_altivec_vstribr((vector signed char)__a);
+#endif
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_strir(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl(__a);
+#else
+  return __builtin_altivec_vstribr(__a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_strir(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl((vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihr((vector signed short)__a);
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_strir(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl(__a);
+#else
+  return __builtin_altivec_vstrihr(__a);
+#endif
+}
+
+/* vec_strir_p */
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+#else
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
+#endif
+}
+
+/* vs[l | r | ra] */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sl(vector signed __int128 __a, vector unsigned __int128 __b) {
+  return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sr(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sr(vector signed __int128 __a, vector unsigned __int128 __b) {
+  return (
+      vector signed __int128)(((vector unsigned __int128)__a) >>
+                              (__b %
+                               (vector unsigned __int128)(sizeof(
+                                                              unsigned __int128) *
+                                                          __CHAR_BIT__)));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sra(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (
+      vector unsigned __int128)(((vector signed __int128)__a) >>
+                                (__b %
+                                 (vector unsigned __int128)(sizeof(
+                                                                unsigned __int128) *
+                                                            __CHAR_BIT__)));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) {
+  return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
 #endif /* __POWER10_VECTOR__ */
 
 #undef __ATTRS_o_ai
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ammintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ammintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ammintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ammintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/amxintrin.h b/darwin-x86/lib64/clang/12.0.1/include/amxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/amxintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/amxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm64intr.h b/darwin-x86/lib64/clang/12.0.1/include/arm64intr.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm64intr.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm64intr.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_acle.h b/darwin-x86/lib64/clang/12.0.1/include/arm_acle.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_acle.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_acle.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_bf16.h b/darwin-x86/lib64/clang/12.0.1/include/arm_bf16.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_bf16.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_bf16.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_cde.h b/darwin-x86/lib64/clang/12.0.1/include/arm_cde.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_cde.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_cde.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_cmse.h b/darwin-x86/lib64/clang/12.0.1/include/arm_cmse.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_cmse.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_cmse.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_fp16.h b/darwin-x86/lib64/clang/12.0.1/include/arm_fp16.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_fp16.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_fp16.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_mve.h b/darwin-x86/lib64/clang/12.0.1/include/arm_mve.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_mve.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_mve.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_neon.h b/darwin-x86/lib64/clang/12.0.1/include/arm_neon.h
similarity index 99%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_neon.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_neon.h
index da1e17c..39ef504 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/arm_neon.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/arm_neon.h
@@ -45860,9 +45860,9 @@
   __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
+__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
@@ -46252,9 +46252,9 @@
   __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
   return __ret;
 }
-__ai int64_t vceqzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
+__ai uint64_t vceqzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vceqzd_f64(float64_t __p0) {
@@ -46333,9 +46333,9 @@
   __ret = (uint64x1_t)(__p0 >= __p1);
   return __ret;
 }
-__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
+__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
@@ -46523,9 +46523,9 @@
 }
 #endif
 
-__ai int64_t vcgezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
+__ai uint64_t vcgezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vcgezd_f64(float64_t __p0) {
@@ -46604,9 +46604,9 @@
   __ret = (uint64x1_t)(__p0 > __p1);
   return __ret;
 }
-__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
+__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
@@ -46794,9 +46794,9 @@
 }
 #endif
 
-__ai int64_t vcgtzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
+__ai uint64_t vcgtzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vcgtzd_f64(float64_t __p0) {
@@ -46880,9 +46880,9 @@
   __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
+__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
@@ -47065,9 +47065,9 @@
 }
 #endif
 
-__ai int64_t vclezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
+__ai uint64_t vclezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vclezd_f64(float64_t __p0) {
@@ -47151,9 +47151,9 @@
   __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
+__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
@@ -47336,9 +47336,9 @@
 }
 #endif
 
-__ai int64_t vcltzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
+__ai uint64_t vcltzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vcltzd_f64(float64_t __p0) {
@@ -57188,30 +57188,30 @@
 }
 #endif
 
-__ai int16_t vqmovuns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
+__ai uint16_t vqmovuns_s32(int32_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
   return __ret;
 }
-__ai int32_t vqmovund_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
+__ai uint32_t vqmovund_s64(int64_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
   return __ret;
 }
-__ai int8_t vqmovunh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
+__ai uint8_t vqmovunh_s16(int16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
   uint16x8_t __ret;
   __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
   return __ret;
 }
 #else
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x8_t __ret;
   __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
@@ -57221,14 +57221,14 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
   uint32x4_t __ret;
   __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
   return __ret;
 }
 #else
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x4_t __ret;
   __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
@@ -57238,14 +57238,14 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
   uint8x16_t __ret;
   __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
   return __ret;
 }
 #else
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
   __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
@@ -57549,22 +57549,22 @@
 })
 #endif
 
-__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
+__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
   uint8_t __ret;
   __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
   return __ret;
 }
-__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
+__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
   uint32_t __ret;
   __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
   return __ret;
 }
-__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
   return __ret;
 }
-__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
+__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
   uint16_t __ret;
   __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
   return __ret;
@@ -57832,22 +57832,22 @@
   __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
   __ret; \
 })
-__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
+__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
   uint8_t __ret;
   __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
   return __ret;
 }
-__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
+__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
   uint32_t __ret;
   __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
   return __ret;
 }
-__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
   return __ret;
 }
-__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
+__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
   uint16_t __ret;
   __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
   return __ret;
@@ -59452,7 +59452,7 @@
   __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
   return __ret;
 }
-__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
   return __ret;
@@ -59853,7 +59853,7 @@
   __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
   __ret; \
 })
-__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
   return __ret;
@@ -62423,9 +62423,9 @@
   __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
+__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1);
   return __ret;
 }
 __ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_sve.h b/darwin-x86/lib64/clang/12.0.1/include/arm_sve.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/arm_sve.h
rename to darwin-x86/lib64/clang/12.0.1/include/arm_sve.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/armintr.h b/darwin-x86/lib64/clang/12.0.1/include/armintr.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/armintr.h
rename to darwin-x86/lib64/clang/12.0.1/include/armintr.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx2intrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx2intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512bf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512bf16intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512bitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512bitalgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512bwintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512bwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512bwintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512cdintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512cdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512cdintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512cdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512dqintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512dqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512dqintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512dqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512erintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512erintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512erintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512erintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512fintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512fintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512fintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512fintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512ifmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512ifmaintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512ifmavlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512ifmavlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512pfintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512pfintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512pfintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512pfintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vbmiintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vbmiintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vbmivlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vbmivlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlbf16intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlbf16intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlbitalgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlbitalgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlbwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlbwintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlcdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlcdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vldqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vldqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlvbmi2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlvnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlvnniintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vlvp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vnniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vnniintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vp2intersectintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vp2intersectintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vpopcntdqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avx512vpopcntdqvlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/avxintrin.h b/darwin-x86/lib64/clang/12.0.1/include/avxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/avxintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/avxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/bits/stdatomic.h b/darwin-x86/lib64/clang/12.0.1/include/bits/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/bits/stdatomic.h
rename to darwin-x86/lib64/clang/12.0.1/include/bits/stdatomic.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/bmi2intrin.h b/darwin-x86/lib64/clang/12.0.1/include/bmi2intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/bmi2intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/bmi2intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/bmiintrin.h b/darwin-x86/lib64/clang/12.0.1/include/bmiintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/bmiintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/bmiintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cet.h b/darwin-x86/lib64/clang/12.0.1/include/cet.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/cet.h
rename to darwin-x86/lib64/clang/12.0.1/include/cet.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cetintrin.h b/darwin-x86/lib64/clang/12.0.1/include/cetintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/cetintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/cetintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cldemoteintrin.h b/darwin-x86/lib64/clang/12.0.1/include/cldemoteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/cldemoteintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/clflushoptintrin.h b/darwin-x86/lib64/clang/12.0.1/include/clflushoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/clflushoptintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/clflushoptintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/clwbintrin.h b/darwin-x86/lib64/clang/12.0.1/include/clwbintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/clwbintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/clwbintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/clzerointrin.h b/darwin-x86/lib64/clang/12.0.1/include/clzerointrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/clzerointrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/clzerointrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cpuid.h b/darwin-x86/lib64/clang/12.0.1/include/cpuid.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/cpuid.h
rename to darwin-x86/lib64/clang/12.0.1/include/cpuid.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm b/darwin-x86/lib64/clang/12.0.1/include/cuda_wrappers/algorithm
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm
rename to darwin-x86/lib64/clang/12.0.1/include/cuda_wrappers/algorithm
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex b/darwin-x86/lib64/clang/12.0.1/include/cuda_wrappers/complex
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex
rename to darwin-x86/lib64/clang/12.0.1/include/cuda_wrappers/complex
diff --git a/darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/new b/darwin-x86/lib64/clang/12.0.1/include/cuda_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/cuda_wrappers/new
rename to darwin-x86/lib64/clang/12.0.1/include/cuda_wrappers/new
diff --git a/darwin-x86/lib64/clang/11.0.5/include/emmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/emmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/emmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/emmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/enqcmdintrin.h b/darwin-x86/lib64/clang/12.0.1/include/enqcmdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/enqcmdintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/enqcmdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/f16cintrin.h b/darwin-x86/lib64/clang/12.0.1/include/f16cintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/f16cintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/f16cintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/float.h b/darwin-x86/lib64/clang/12.0.1/include/float.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/float.h
rename to darwin-x86/lib64/clang/12.0.1/include/float.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/fma4intrin.h b/darwin-x86/lib64/clang/12.0.1/include/fma4intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/fma4intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/fma4intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/fmaintrin.h b/darwin-x86/lib64/clang/12.0.1/include/fmaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/fmaintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/fmaintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h b/darwin-x86/lib64/clang/12.0.1/include/fuzzer/FuzzedDataProvider.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h
rename to darwin-x86/lib64/clang/12.0.1/include/fuzzer/FuzzedDataProvider.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/fxsrintrin.h b/darwin-x86/lib64/clang/12.0.1/include/fxsrintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/fxsrintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/fxsrintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/gfniintrin.h b/darwin-x86/lib64/clang/12.0.1/include/gfniintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/gfniintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/gfniintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/htmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/htmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/htmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/htmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/htmxlintrin.h b/darwin-x86/lib64/clang/12.0.1/include/htmxlintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/htmxlintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h b/darwin-x86/lib64/clang/12.0.1/include/ia32intrin.h
similarity index 78%
rename from darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ia32intrin.h
index 79b7f06..00138ef 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/ia32intrin.h
@@ -14,6 +14,18 @@
 #ifndef __IA32INTRIN_H
 #define __IA32INTRIN_H
 
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS_SSE42 __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) constexpr
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__))
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
 /** Find the first set bit starting from the lsb. Result is undefined if
  *  input is 0.
  *
@@ -26,7 +38,7 @@
  *     A 32-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfd(int __A) {
   return __builtin_ctz(__A);
 }
@@ -43,7 +55,7 @@
  *     A 32-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrd(int __A) {
   return 31 - __builtin_clz(__A);
 }
@@ -59,12 +71,12 @@
  *     A 32-bit integer operand.
  *  \returns A 32-bit integer containing the swapped bytes.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapd(int __A) {
   return __builtin_bswap32(__A);
 }
 
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 _bswap(int __A) {
   return __builtin_bswap32(__A);
 }
@@ -85,7 +97,7 @@
  *     A 64-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfq(long long __A) {
   return __builtin_ctzll(__A);
 }
@@ -102,7 +114,7 @@
  *     A 64-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrq(long long __A) {
   return 63 - __builtin_clzll(__A);
 }
@@ -118,7 +130,7 @@
  *     A 64-bit integer operand.
  *  \returns A 64-bit integer containing the swapped bytes.
  */
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapq(long long __A) {
   return __builtin_bswap64(__A);
 }
@@ -138,7 +150,7 @@
  *  \returns A 32-bit integer containing the number of bits with value 1 in the
  *     source operand.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __popcntd(unsigned int __A)
 {
   return __builtin_popcount(__A);
@@ -159,7 +171,7 @@
  *  \returns A 64-bit integer containing the number of bits with value 1 in the
  *     source operand.
  */
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __popcntq(unsigned long long __A)
 {
   return __builtin_popcountll(__A);
@@ -169,26 +181,26 @@
 #endif /* __x86_64__ */
 
 #ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __readeflags(void)
 {
   return __builtin_ia32_readeflags_u64();
 }
 
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
 __writeeflags(unsigned long long __f)
 {
   __builtin_ia32_writeeflags_u64(__f);
 }
 
 #else /* !__x86_64__ */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
 __readeflags(void)
 {
   return __builtin_ia32_readeflags_u32();
 }
 
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
 __writeeflags(unsigned int __f)
 {
   __builtin_ia32_writeeflags_u32(__f);
@@ -205,11 +217,9 @@
  *     A 32-bit float value.
  *  \returns a 32-bit unsigned integer containing the converted value.
  */
-static __inline__ unsigned int __attribute__((__always_inline__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST
 _castf32_u32(float __A) {
-  unsigned int D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(unsigned int, __A);
 }
 
 /** Cast a 64-bit float value to a 64-bit unsigned integer value
@@ -222,11 +232,9 @@
  *     A 64-bit float value.
  *  \returns a 64-bit unsigned integer containing the converted value.
  */
-static __inline__ unsigned long long __attribute__((__always_inline__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST
 _castf64_u64(double __A) {
-  unsigned long long D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(unsigned long long, __A);
 }
 
 /** Cast a 32-bit unsigned integer value to a 32-bit float value
@@ -239,11 +247,9 @@
  *     A 32-bit unsigned integer value.
  *  \returns a 32-bit float value containing the converted value.
  */
-static __inline__ float __attribute__((__always_inline__))
+static __inline__ float __DEFAULT_FN_ATTRS_CAST
 _castu32_f32(unsigned int __A) {
-  float D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(float, __A);
 }
 
 /** Cast a 64-bit unsigned integer value to a 64-bit float value
@@ -256,11 +262,9 @@
  *     A 64-bit unsigned integer value.
  *  \returns a 64-bit float value containing the converted value.
  */
-static __inline__ double __attribute__((__always_inline__))
+static __inline__ double __DEFAULT_FN_ATTRS_CAST
 _castu64_f64(unsigned long long __A) {
-  double D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(double, __A);
 }
 
 /** Adds the unsigned integer operand to the CRC-32C checksum of the
@@ -278,7 +282,7 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
 __crc32b(unsigned int __C, unsigned char __D)
 {
   return __builtin_ia32_crc32qi(__C, __D);
@@ -299,7 +303,7 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
 __crc32w(unsigned int __C, unsigned short __D)
 {
   return __builtin_ia32_crc32hi(__C, __D);
@@ -320,7 +324,7 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
 __crc32d(unsigned int __C, unsigned int __D)
 {
   return __builtin_ia32_crc32si(__C, __D);
@@ -342,20 +346,20 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_SSE42
 __crc32q(unsigned long long __C, unsigned long long __D)
 {
   return __builtin_ia32_crc32di(__C, __D);
 }
 #endif /* __x86_64__ */
 
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __rdpmc(int __A) {
   return __builtin_ia32_rdpmc(__A);
 }
 
 /* __rdtscp */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __rdtscp(unsigned int *__A) {
   return __builtin_ia32_rdtscp(__A);
 }
@@ -364,48 +368,48 @@
 
 #define _rdpmc(A) __rdpmc(A)
 
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
 _wbinvd(void) {
   __builtin_ia32_wbinvd();
 }
 
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolb(unsigned char __X, int __C) {
   return __builtin_rotateleft8(__X, __C);
 }
 
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorb(unsigned char __X, int __C) {
   return __builtin_rotateright8(__X, __C);
 }
 
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolw(unsigned short __X, int __C) {
   return __builtin_rotateleft16(__X, __C);
 }
 
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorw(unsigned short __X, int __C) {
   return __builtin_rotateright16(__X, __C);
 }
 
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rold(unsigned int __X, int __C) {
   return __builtin_rotateleft32(__X, __C);
 }
 
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rord(unsigned int __X, int __C) {
   return __builtin_rotateright32(__X, __C);
 }
 
 #ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolq(unsigned long long __X, int __C) {
   return __builtin_rotateleft64(__X, __C);
 }
 
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorq(unsigned long long __X, int __C) {
   return __builtin_rotateright64(__X, __C);
 }
@@ -429,4 +433,9 @@
 #define _rotwl(a,b) __rolw((a), (b))
 #define _rotwr(a,b) __rorw((a), (b))
 
+#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CAST
+#undef __DEFAULT_FN_ATTRS_SSE42
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
+
 #endif /* __IA32INTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/immintrin.h b/darwin-x86/lib64/clang/12.0.1/include/immintrin.h
similarity index 98%
rename from darwin-x86/lib64/clang/11.0.5/include/immintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/immintrin.h
index e9dff23..8fb5447 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/immintrin.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/immintrin.h
@@ -472,6 +472,11 @@
 #endif
 
 #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__KL__) || defined(__WIDEKL__)
+#include <keylockerintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
 #include <amxintrin.h>
 #endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/intrin.h b/darwin-x86/lib64/clang/12.0.1/include/intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/inttypes.h b/darwin-x86/lib64/clang/12.0.1/include/inttypes.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/inttypes.h
rename to darwin-x86/lib64/clang/12.0.1/include/inttypes.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/invpcidintrin.h b/darwin-x86/lib64/clang/12.0.1/include/invpcidintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/invpcidintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/invpcidintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/iso646.h b/darwin-x86/lib64/clang/12.0.1/include/iso646.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/iso646.h
rename to darwin-x86/lib64/clang/12.0.1/include/iso646.h
diff --git a/darwin-x86/lib64/clang/12.0.1/include/keylockerintrin.h b/darwin-x86/lib64/clang/12.0.1/include/keylockerintrin.h
new file mode 100644
index 0000000..c15d39c
--- /dev/null
+++ b/darwin-x86/lib64/clang/12.0.1/include/keylockerintrin.h
@@ -0,0 +1,506 @@
+/*===----------------- keylockerintrin.h - KL Intrinsics -------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <keylockerintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _KEYLOCKERINTRIN_H
+#define _KEYLOCKERINTRIN_H
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__KL__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("kl"),\
+                 __min_vector_width__(128)))
+
+/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl
+/// will assigned to EAX, whch specifies the KeySource and whether backing up
+/// the key is permitted. The 256-bit encryption key is loaded from the two
+/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is
+/// loaded from the implicit operand XMM0 which assigned by __intkey.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
+///
+/// \operation
+/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
+///   GP (0)
+/// FI
+/// IF “LOADIWKEY exiting” VM execution control set
+///   VMexit
+/// FI
+/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used
+///   GP (0)
+/// FI
+/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set
+///   GP (0)
+/// FI
+/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part
+///   GP (0)
+/// FI
+/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part
+///   GP (0)
+/// FI
+/// IF (__ctl[4:1] == 0) // KeySource of 0.
+///   IWKey.Encryption Key[127:0] := __enkey_hi[127:0]:
+///   IWKey.Encryption Key[255:128] := __enkey_lo[127:0]
+///   IWKey.IntegrityKey[127:0] := __intkey[127:0]
+///   IWKey.NoBackup := __ctl[0]
+///   IWKey.KeySource := __ctl[4:1]
+///   ZF := 0
+/// ELSE // KeySource of 1. See RDSEED definition for details of randomness
+///   IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received
+///     IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0]
+///     IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128]
+///     IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0]
+///     IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256]
+///     IWKey.NoBackup := __ctl[0]
+///     IWKey.KeySource := __ctl[4:1]
+///     ZF := 0
+///   ELSE // Random data was not returned from RDSEED. IWKey was not loaded
+///     ZF := 1
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
+               __m128i __enkey_lo, __m128i __enkey_hi) {
+  __builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl);
+}
+
+/// Wrap a 128-bit AES key from __key into a key handle and output in
+/// ((__m128i*)__h) to ((__m128i*)__h) + 5  and a 32-bit value as return.
+/// The explicit source operand __htype specifies handle restrictions.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
+///
+/// \operation
+/// InputKey[127:0] := __key[127:0]
+/// KeyMetadata[2:0] := __htype[2:0]
+/// KeyMetadata[23:3] := 0 // Reserved for future usage
+/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0)
+/// KeyMetadata[127:28] := 0 // Reserved for future usage
+/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0],
+///                  IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
+/// dst[0] := IWKey.NoBackup
+/// dst[4:1] := IWKey.KeySource[3:0]
+/// dst[31:5] := 0
+/// MEM[__h+127:__h] := Handle[127:0]   // AAD
+/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag
+/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText
+/// MEM[__h+511:__h+384] := 0 // Reserved for future usage
+/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
+/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
+/// OF := 0
+/// SF := 0
+/// ZF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
+  return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
+}
+
+/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then
+/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 6 and
+/// a 32-bit value as return.
+/// The explicit source operand __htype specifies handle restrictions.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
+///
+/// \operation
+/// InputKey[127:0] := __key_lo[127:0]
+/// InputKey[255:128] := __key_hi[255:128]
+/// KeyMetadata[2:0] := __htype[2:0]
+/// KeyMetadata[23:3] := 0 // Reserved for future usage
+/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1)
+/// KeyMetadata[127:28] := 0 // Reserved for future usage
+/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0],
+///                  IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
+/// dst[0] := IWKey.NoBackup
+/// dst[4:1] := IWKey.KeySource[3:0]
+/// dst[31:5] := 0
+/// MEM[__h+127:__h]   := Handle[127:0] // AAD
+/// MEM[__h+255:__h+128] := Handle[255:128] // Tag
+/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0]
+/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128]
+/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
+/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
+/// MEM[__h+895:__h+768] := 0 Integrity// Reserved for future usage
+/// OF := 0
+/// SF := 0
+/// ZF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
+                     void *__h) {
+  return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo,
+                                         (__v2di)__key_hi, __h);
+}
+
+/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using
+/// the 128-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[383:256] ||
+///                    HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using
+/// the 256-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using
+/// the 128-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
+///                  (Handle[127:0] AND (CPL > 0)) ||
+///                  Handle[383:256] ||
+///                  HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128)
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using
+/// the 256-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
+///                   (Handle[127:0] AND (CPL > 0)) ||
+///                   Handle[383:256] ||
+///                   HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256)
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+          || defined(__KL__) */
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__WIDEKL__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\
+                 __min_vector_width__(128)))
+
+/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
+///
+/// \operation
+/// Handle := MEM[__h+383:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
+///                   (Handle[127:0] AND (CPL > 0)) ||
+///                   Handle[255:128] ||
+///                   HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 )
+/// If (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+          || defined(__WIDEKL__) */
+
+#endif /* _KEYLOCKERINTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/limits.h b/darwin-x86/lib64/clang/12.0.1/include/limits.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/limits.h
rename to darwin-x86/lib64/clang/12.0.1/include/limits.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/lwpintrin.h b/darwin-x86/lib64/clang/12.0.1/include/lwpintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/lwpintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/lwpintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/lzcntintrin.h b/darwin-x86/lib64/clang/12.0.1/include/lzcntintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/lzcntintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/lzcntintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/mm3dnow.h b/darwin-x86/lib64/clang/12.0.1/include/mm3dnow.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/mm3dnow.h
rename to darwin-x86/lib64/clang/12.0.1/include/mm3dnow.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/mm_malloc.h b/darwin-x86/lib64/clang/12.0.1/include/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/mm_malloc.h
rename to darwin-x86/lib64/clang/12.0.1/include/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/mmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/mmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/mmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/module.modulemap b/darwin-x86/lib64/clang/12.0.1/include/module.modulemap
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/module.modulemap
rename to darwin-x86/lib64/clang/12.0.1/include/module.modulemap
diff --git a/darwin-x86/lib64/clang/11.0.5/include/movdirintrin.h b/darwin-x86/lib64/clang/12.0.1/include/movdirintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/movdirintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/movdirintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/msa.h b/darwin-x86/lib64/clang/12.0.1/include/msa.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/msa.h
rename to darwin-x86/lib64/clang/12.0.1/include/msa.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/mwaitxintrin.h b/darwin-x86/lib64/clang/12.0.1/include/mwaitxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/mwaitxintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/mwaitxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/nmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/nmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/nmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/nmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/omp-tools.h b/darwin-x86/lib64/clang/12.0.1/include/omp-tools.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/omp-tools.h
rename to darwin-x86/lib64/clang/12.0.1/include/omp-tools.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/omp.h b/darwin-x86/lib64/clang/12.0.1/include/omp.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/omp.h
rename to darwin-x86/lib64/clang/12.0.1/include/omp.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ompt.h b/darwin-x86/lib64/clang/12.0.1/include/ompt.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ompt.h
rename to darwin-x86/lib64/clang/12.0.1/include/ompt.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/opencl-c-base.h b/darwin-x86/lib64/clang/12.0.1/include/opencl-c-base.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/opencl-c-base.h
rename to darwin-x86/lib64/clang/12.0.1/include/opencl-c-base.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/opencl-c.h b/darwin-x86/lib64/clang/12.0.1/include/opencl-c.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/opencl-c.h
rename to darwin-x86/lib64/clang/12.0.1/include/opencl-c.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h
rename to darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/cmath
similarity index 95%
rename from darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
rename to darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/cmath
index bd6011e..1aff66a 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
+++ b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/cmath
@@ -24,8 +24,11 @@
 // which might live in cstdlib.
 #include <cstdlib>
 
+// We need limits because __clang_cuda_cmath.h below uses `std::numeric_limit`.
+#include <limits>
+
 #pragma omp begin declare variant match(                                       \
-    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any, allow_templates)})
 
 #define __CUDA__
 #define __OPENMP_NVPTX__
diff --git a/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex
new file mode 100644
index 0000000..306ffe2
--- /dev/null
+++ b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex
@@ -0,0 +1,50 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX__
+#define __CLANG_OPENMP_COMPLEX__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require std::math functions in the complex builtins below.
+#include <cmath>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex>
+
+
+#ifdef __cplusplus
+
+// If we are compiling against libc++, the macro _LIBCPP_STD_VER should be set
+// after including <cmath> above. Since the complex header we use is a
+// simplified version of the libc++, we don't need it in this case. If we
+// compile against libstdc++, or any other standard library, we will overload
+// the (hopefully template) functions in the <complex> header with the ones we
+// got from libc++ which decomposes math functions, like `std::sin`, into
+// arithmetic and calls to non-complex functions, all of which we can then
+// handle.
+#ifndef _LIBCPP_STD_VER
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)},                                           \
+    implementation = {extension(match_any, allow_templates)})
+
+#include <complex_cmath.h>
+
+#pragma omp end declare variant
+
+#endif
+
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h
rename to darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex.h
diff --git a/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex_cmath.h b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex_cmath.h
new file mode 100644
index 0000000..e3d9aeb
--- /dev/null
+++ b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex_cmath.h
@@ -0,0 +1,388 @@
+//===------------------------- __complex_cmath.h --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// std::complex header copied from the libcxx source and simplified for use in
+// OpenMP target offload regions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#ifndef __cplusplus
+#error "This file is for C++ compilation only."
+#endif
+
+#ifndef _LIBCPP_COMPLEX
+#define _LIBCPP_COMPLEX
+
+#include <cmath>
+#include <type_traits>
+
+#define __DEVICE__ static constexpr __attribute__((nothrow))
+
+namespace std {
+
+// abs
+
+template <class _Tp> __DEVICE__ _Tp abs(const std::complex<_Tp> &__c) {
+  return hypot(__c.real(), __c.imag());
+}
+
+// arg
+
+template <class _Tp> __DEVICE__ _Tp arg(const std::complex<_Tp> &__c) {
+  return atan2(__c.imag(), __c.real());
+}
+
+template <class _Tp>
+typename enable_if<is_integral<_Tp>::value || is_same<_Tp, double>::value,
+                   double>::type
+arg(_Tp __re) {
+  return atan2(0., __re);
+}
+
+template <class _Tp>
+typename enable_if<is_same<_Tp, float>::value, float>::type arg(_Tp __re) {
+  return atan2f(0.F, __re);
+}
+
+// norm
+
+template <class _Tp> __DEVICE__ _Tp norm(const std::complex<_Tp> &__c) {
+  if (std::isinf(__c.real()))
+    return abs(__c.real());
+  if (std::isinf(__c.imag()))
+    return abs(__c.imag());
+  return __c.real() * __c.real() + __c.imag() * __c.imag();
+}
+
+// conj
+
+template <class _Tp> std::complex<_Tp> conj(const std::complex<_Tp> &__c) {
+  return std::complex<_Tp>(__c.real(), -__c.imag());
+}
+
+// proj
+
+template <class _Tp> std::complex<_Tp> proj(const std::complex<_Tp> &__c) {
+  std::complex<_Tp> __r = __c;
+  if (std::isinf(__c.real()) || std::isinf(__c.imag()))
+    __r = std::complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag()));
+  return __r;
+}
+
+// polar
+
+template <class _Tp>
+complex<_Tp> polar(const _Tp &__rho, const _Tp &__theta = _Tp()) {
+  if (std::isnan(__rho) || signbit(__rho))
+    return std::complex<_Tp>(_Tp(NAN), _Tp(NAN));
+  if (std::isnan(__theta)) {
+    if (std::isinf(__rho))
+      return std::complex<_Tp>(__rho, __theta);
+    return std::complex<_Tp>(__theta, __theta);
+  }
+  if (std::isinf(__theta)) {
+    if (std::isinf(__rho))
+      return std::complex<_Tp>(__rho, _Tp(NAN));
+    return std::complex<_Tp>(_Tp(NAN), _Tp(NAN));
+  }
+  _Tp __x = __rho * cos(__theta);
+  if (std::isnan(__x))
+    __x = 0;
+  _Tp __y = __rho * sin(__theta);
+  if (std::isnan(__y))
+    __y = 0;
+  return std::complex<_Tp>(__x, __y);
+}
+
+// log
+
+template <class _Tp> std::complex<_Tp> log(const std::complex<_Tp> &__x) {
+  return std::complex<_Tp>(log(abs(__x)), arg(__x));
+}
+
+// log10
+
+template <class _Tp> std::complex<_Tp> log10(const std::complex<_Tp> &__x) {
+  return log(__x) / log(_Tp(10));
+}
+
+// sqrt
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sqrt(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(_Tp(INFINITY), __x.imag());
+  if (std::isinf(__x.real())) {
+    if (__x.real() > _Tp(0))
+      return std::complex<_Tp>(__x.real(), std::isnan(__x.imag())
+                                               ? __x.imag()
+                                               : copysign(_Tp(0), __x.imag()));
+    return std::complex<_Tp>(std::isnan(__x.imag()) ? __x.imag() : _Tp(0),
+                             copysign(__x.real(), __x.imag()));
+  }
+  return polar(sqrt(abs(__x)), arg(__x) / _Tp(2));
+}
+
+// exp
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> exp(const std::complex<_Tp> &__x) {
+  _Tp __i = __x.imag();
+  if (std::isinf(__x.real())) {
+    if (__x.real() < _Tp(0)) {
+      if (!std::isfinite(__i))
+        __i = _Tp(1);
+    } else if (__i == 0 || !std::isfinite(__i)) {
+      if (std::isinf(__i))
+        __i = _Tp(NAN);
+      return std::complex<_Tp>(__x.real(), __i);
+    }
+  } else if (std::isnan(__x.real()) && __x.imag() == 0)
+    return __x;
+  _Tp __e = exp(__x.real());
+  return std::complex<_Tp>(__e * cos(__i), __e * sin(__i));
+}
+
+// pow
+
+template <class _Tp>
+std::complex<_Tp> pow(const std::complex<_Tp> &__x,
+                      const std::complex<_Tp> &__y) {
+  return exp(__y * log(__x));
+}
+
+// __sqr, computes pow(x, 2)
+
+template <class _Tp> std::complex<_Tp> __sqr(const std::complex<_Tp> &__x) {
+  return std::complex<_Tp>((__x.real() - __x.imag()) *
+                               (__x.real() + __x.imag()),
+                           _Tp(2) * __x.real() * __x.imag());
+}
+
+// asinh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> asinh(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.real())) {
+    if (std::isnan(__x.imag()))
+      return __x;
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(__x.real(),
+                               copysign(__pi * _Tp(0.25), __x.imag()));
+    return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
+  }
+  if (std::isnan(__x.real())) {
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(__x.imag(), __x.real());
+    if (__x.imag() == 0)
+      return __x;
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(copysign(__x.imag(), __x.real()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) + _Tp(1)));
+  return std::complex<_Tp>(copysign(__z.real(), __x.real()),
+                           copysign(__z.imag(), __x.imag()));
+}
+
+// acosh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> acosh(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.real())) {
+    if (std::isnan(__x.imag()))
+      return std::complex<_Tp>(abs(__x.real()), __x.imag());
+    if (std::isinf(__x.imag())) {
+      if (__x.real() > 0)
+        return std::complex<_Tp>(__x.real(),
+                                 copysign(__pi * _Tp(0.25), __x.imag()));
+      else
+        return std::complex<_Tp>(-__x.real(),
+                                 copysign(__pi * _Tp(0.75), __x.imag()));
+    }
+    if (__x.real() < 0)
+      return std::complex<_Tp>(-__x.real(), copysign(__pi, __x.imag()));
+    return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
+  }
+  if (std::isnan(__x.real())) {
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(abs(__x.imag()), __x.real());
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(abs(__x.imag()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1)));
+  return std::complex<_Tp>(copysign(__z.real(), _Tp(0)),
+                           copysign(__z.imag(), __x.imag()));
+}
+
+// atanh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> atanh(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.imag())) {
+    return std::complex<_Tp>(copysign(_Tp(0), __x.real()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  }
+  if (std::isnan(__x.imag())) {
+    if (std::isinf(__x.real()) || __x.real() == 0)
+      return std::complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag());
+    return std::complex<_Tp>(__x.imag(), __x.imag());
+  }
+  if (std::isnan(__x.real())) {
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.real())) {
+    return std::complex<_Tp>(copysign(_Tp(0), __x.real()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  }
+  if (abs(__x.real()) == _Tp(1) && __x.imag() == _Tp(0)) {
+    return std::complex<_Tp>(copysign(_Tp(INFINITY), __x.real()),
+                             copysign(_Tp(0), __x.imag()));
+  }
+  std::complex<_Tp> __z = log((_Tp(1) + __x) / (_Tp(1) - __x)) / _Tp(2);
+  return std::complex<_Tp>(copysign(__z.real(), __x.real()),
+                           copysign(__z.imag(), __x.imag()));
+}
+
+// sinh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sinh(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.real()) && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(__x.real(), _Tp(NAN));
+  if (__x.real() == 0 && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(__x.real(), _Tp(NAN));
+  if (__x.imag() == 0 && !std::isfinite(__x.real()))
+    return __x;
+  return std::complex<_Tp>(sinh(__x.real()) * cos(__x.imag()),
+                           cosh(__x.real()) * sin(__x.imag()));
+}
+
+// cosh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> cosh(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.real()) && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(abs(__x.real()), _Tp(NAN));
+  if (__x.real() == 0 && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(_Tp(NAN), __x.real());
+  if (__x.real() == 0 && __x.imag() == 0)
+    return std::complex<_Tp>(_Tp(1), __x.imag());
+  if (__x.imag() == 0 && !std::isfinite(__x.real()))
+    return std::complex<_Tp>(abs(__x.real()), __x.imag());
+  return std::complex<_Tp>(cosh(__x.real()) * cos(__x.imag()),
+                           sinh(__x.real()) * sin(__x.imag()));
+}
+
+// tanh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> tanh(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.real())) {
+    if (!std::isfinite(__x.imag()))
+      return std::complex<_Tp>(_Tp(1), _Tp(0));
+    return std::complex<_Tp>(_Tp(1),
+                             copysign(_Tp(0), sin(_Tp(2) * __x.imag())));
+  }
+  if (std::isnan(__x.real()) && __x.imag() == 0)
+    return __x;
+  _Tp __2r(_Tp(2) * __x.real());
+  _Tp __2i(_Tp(2) * __x.imag());
+  _Tp __d(cosh(__2r) + cos(__2i));
+  _Tp __2rsh(sinh(__2r));
+  if (std::isinf(__2rsh) && std::isinf(__d))
+    return std::complex<_Tp>(__2rsh > _Tp(0) ? _Tp(1) : _Tp(-1),
+                             __2i > _Tp(0) ? _Tp(0) : _Tp(-0.));
+  return std::complex<_Tp>(__2rsh / __d, sin(__2i) / __d);
+}
+
+// asin
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> asin(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = asinh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// acos
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> acos(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.real())) {
+    if (std::isnan(__x.imag()))
+      return std::complex<_Tp>(__x.imag(), __x.real());
+    if (std::isinf(__x.imag())) {
+      if (__x.real() < _Tp(0))
+        return std::complex<_Tp>(_Tp(0.75) * __pi, -__x.imag());
+      return std::complex<_Tp>(_Tp(0.25) * __pi, -__x.imag());
+    }
+    if (__x.real() < _Tp(0))
+      return std::complex<_Tp>(__pi,
+                               signbit(__x.imag()) ? -__x.real() : __x.real());
+    return std::complex<_Tp>(_Tp(0),
+                             signbit(__x.imag()) ? __x.real() : -__x.real());
+  }
+  if (std::isnan(__x.real())) {
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(__x.real(), -__x.imag());
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(__pi / _Tp(2), -__x.imag());
+  if (__x.real() == 0 && (__x.imag() == 0 || isnan(__x.imag())))
+    return std::complex<_Tp>(__pi / _Tp(2), -__x.imag());
+  std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1)));
+  if (signbit(__x.imag()))
+    return std::complex<_Tp>(abs(__z.imag()), abs(__z.real()));
+  return std::complex<_Tp>(abs(__z.imag()), -abs(__z.real()));
+}
+
+// atan
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> atan(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = atanh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// sin
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sin(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = sinh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// cos
+
+template <class _Tp> std::complex<_Tp> cos(const std::complex<_Tp> &__x) {
+  return cosh(complex<_Tp>(-__x.imag(), __x.real()));
+}
+
+// tan
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> tan(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = tanh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+} // namespace std
+
+#endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/math.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h
rename to darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/math.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/new b/darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/new
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/new
rename to darwin-x86/lib64/clang/12.0.1/include/openmp_wrappers/new
diff --git a/darwin-x86/lib64/clang/11.0.5/include/pconfigintrin.h b/darwin-x86/lib64/clang/12.0.1/include/pconfigintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/pconfigintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/pconfigintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/pkuintrin.h b/darwin-x86/lib64/clang/12.0.1/include/pkuintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/pkuintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/pkuintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/pmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/pmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h b/darwin-x86/lib64/clang/12.0.1/include/popcntintrin.h
similarity index 81%
rename from darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/popcntintrin.h
index 3129010..0aa94ae 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/popcntintrin.h
@@ -13,6 +13,12 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
 
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
 /// Counts the number of bits in the source operand having a value of 1.
 ///
 /// \headerfile <x86intrin.h>
@@ -23,7 +29,7 @@
 ///    An unsigned 32-bit integer operand.
 /// \returns A 32-bit integer containing the number of bits with value 1 in the
 ///    source operand.
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 _mm_popcnt_u32(unsigned int __A)
 {
   return __builtin_popcount(__A);
@@ -40,7 +46,7 @@
 ///    An unsigned 64-bit integer operand.
 /// \returns A 64-bit integer containing the number of bits with value 1 in the
 ///    source operand.
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 _mm_popcnt_u64(unsigned long long __A)
 {
   return __builtin_popcountll(__A);
@@ -48,5 +54,6 @@
 #endif /* __x86_64__ */
 
 #undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
 
 #endif /* __POPCNTINTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/emmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h b/darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h
rename to darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/mm_malloc.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/mmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/smmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/tmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ppc_wrappers/xmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/prfchwintrin.h b/darwin-x86/lib64/clang/12.0.1/include/prfchwintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/prfchwintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc b/darwin-x86/lib64/clang/12.0.1/include/profile/InstrProfData.inc
similarity index 88%
rename from darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
rename to darwin-x86/lib64/clang/12.0.1/include/profile/InstrProfData.inc
index a691352..6d0ffb1 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
+++ b/darwin-x86/lib64/clang/12.0.1/include/profile/InstrProfData.inc
@@ -157,6 +157,8 @@
 #ifndef VALUE_RANGE_PROF
 VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
 #else /* VALUE_RANGE_PROF */
+/* FIXME: This is to be removed after switching to the new memop value
+ * profiling. */
 VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
                       INSTR_PROF_COMMA
 VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
@@ -753,9 +755,14 @@
 #define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
 #define INSTR_PROF_VALUE_PROF_FUNC_STR \
         INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
+/* FIXME: This is to be removed after switching to the new memop value
+ * profiling. */
 #define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
 #define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
         INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
+#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC __llvm_profile_instrument_memop
+#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC_STR                                   \
+  INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_MEMOP_FUNC)
 
 /* InstrProfile per-function control data alignment.  */
 #define INSTR_PROF_DATA_ALIGNMENT 8
@@ -783,3 +790,121 @@
 #endif
 
 #undef COVMAP_V2_OR_V3
+
+#ifdef INSTR_PROF_VALUE_PROF_MEMOP_API
+
+#ifdef __cplusplus
+#define INSTR_PROF_INLINE inline
+#else
+#define INSTR_PROF_INLINE
+#endif
+
+/* The value range buckets (22 buckets) for the memop size value profiling looks
+ * like:
+ *
+ *   [0, 0]
+ *   [1, 1]
+ *   [2, 2]
+ *   [3, 3]
+ *   [4, 4]
+ *   [5, 5]
+ *   [6, 6]
+ *   [7, 7]
+ *   [8, 8]
+ *   [9, 15]
+ *   [16, 16]
+ *   [17, 31]
+ *   [32, 32]
+ *   [33, 63]
+ *   [64, 64]
+ *   [65, 127]
+ *   [128, 128]
+ *   [129, 255]
+ *   [256, 256]
+ *   [257, 511]
+ *   [512, 512]
+ *   [513, UINT64_MAX]
+ *
+ * Each range has a 'representative value' which is the lower end value of the
+ * range and used to store in the runtime profile data records and the VP
+ * metadata. For example, it's 2 for [2, 2] and 64 for [65, 127].
+ */
+
+/*
+ * Clz and Popcount. This code was copied from
+ * compiler-rt/lib/fuzzer/{FuzzerBuiltins.h,FuzzerBuiltinsMsvc.h} and
+ * llvm/include/llvm/Support/MathExtras.h.
+ */
+#if defined(_MSC_VER) && !defined(__clang__)
+
+#include <intrin.h>
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfClzll(unsigned long long X) {
+  unsigned long LeadZeroIdx = 0;
+#if !defined(_M_ARM64) && !defined(_M_X64)
+  // Scan the high 32 bits.
+  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X >> 32)))
+    return (int)(63 - (LeadZeroIdx + 32)); // Create a bit offset
+                                                      // from the MSB.
+  // Scan the low 32 bits.
+  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X)))
+    return (int)(63 - LeadZeroIdx);
+#else
+  if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx;
+#endif
+  return 64;
+}
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfPopcountll(unsigned long long X) {
+  // This code originates from https://reviews.llvm.org/rG30626254510f.
+  unsigned long long v = X;
+  v = v - ((v >> 1) & 0x5555555555555555ULL);
+  v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
+  v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+  return (int)((unsigned long long)(v * 0x0101010101010101ULL) >> 56);
+}
+
+#else
+
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfClzll(unsigned long long X) { return __builtin_clzll(X); }
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfPopcountll(unsigned long long X) { return __builtin_popcountll(X); }
+
+#endif  /* defined(_MSC_VER) && !defined(__clang__) */
+
+/* Map an (observed) memop size value to the representative value of its range.
+ * For example, 5 -> 5, 22 -> 17, 99 -> 65, 256 -> 256, 1001 -> 513. */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE uint64_t
+InstrProfGetRangeRepValue(uint64_t Value) {
+  if (Value <= 8)
+    // The first ranges are individually tracked. Use the value as is.
+    return Value;
+  else if (Value >= 513)
+    // The last range is mapped to its lowest value.
+    return 513;
+  else if (InstProfPopcountll(Value) == 1)
+    // If it's a power of two, use it as is.
+    return Value;
+  else
+    // Otherwise, take to the previous power of two + 1.
+    return (1 << (64 - InstProfClzll(Value) - 1)) + 1;
+}
+
+/* Return true if the range that an (observed) memop size value belongs to has
+ * only a single value in the range.  For example, 0 -> true, 8 -> true, 10 ->
+ * false, 64 -> true, 100 -> false, 513 -> false. */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE unsigned
+InstrProfIsSingleValRange(uint64_t Value) {
+  if (Value <= 8)
+    // The first ranges are individually tracked.
+    return 1;
+  else if (InstProfPopcountll(Value) == 1)
+    // If it's a power of two, there's only one value.
+    return 1;
+  else
+    // Otherwise, there's more than one value in the range.
+    return 0;
+}
+
+#endif /* INSTR_PROF_VALUE_PROF_MEMOP_API */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ptwriteintrin.h b/darwin-x86/lib64/clang/12.0.1/include/ptwriteintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/ptwriteintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/ptwriteintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/rdseedintrin.h b/darwin-x86/lib64/clang/12.0.1/include/rdseedintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/rdseedintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/rdseedintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/rtmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/rtmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/rtmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/rtmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/s390intrin.h b/darwin-x86/lib64/clang/12.0.1/include/s390intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/s390intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/s390intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/allocator_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/allocator_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/asan_interface.h
similarity index 96%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/asan_interface.h
index 6af93aa..792ef9c 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/asan_interface.h
@@ -188,8 +188,8 @@
 /// \param addr Address to locate.
 /// \param name Buffer to store the variable's name.
 /// \param name_size Size in bytes of the variable's name buffer.
-/// \param region_address [out] Address of the region.
-/// \param region_size [out] Size of the region in bytes.
+/// \param[out] region_address Address of the region.
+/// \param[out] region_size Size of the region in bytes.
 ///
 /// \returns Returns the category of the given pointer as a constant string.
 const char *__asan_locate_address(void *addr, char *name, size_t name_size,
@@ -204,7 +204,7 @@
 /// \param addr A heap address.
 /// \param trace A buffer to store the stack trace.
 /// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
+/// \param[out] thread_id The thread ID of the address.
 ///
 /// \returns Returns the number of stored frames or 0 on error.
 size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
@@ -219,7 +219,7 @@
 /// \param addr A heap address.
 /// \param trace A buffer to store the stack trace.
 /// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
+/// \param[out] thread_id The thread ID of the address.
 ///
 /// \returns Returns the number of stored frames or 0 on error.
 size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
@@ -228,8 +228,8 @@
 /// Gets the current shadow memory mapping (useful for calling from the
 /// debugger).
 ///
-/// \param shadow_scale [out] Shadow scale value.
-/// \param shadow_offset [out] Offset value.
+/// \param[out] shadow_scale Shadow scale value.
+/// \param[out] shadow_offset Offset value.
 void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
 
 /// This is an internal function that is called to report an error. However,
@@ -302,8 +302,8 @@
 ///
 /// \param fake_stack An opaque handler to a fake stack.
 /// \param addr Address to test.
-/// \param beg [out] Beginning of fake frame.
-/// \param end [out] End of fake frame.
+/// \param[out] beg Beginning of fake frame.
+/// \param[out] end End of fake frame.
 /// \returns Stack address or NULL.
 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
                                    void **end);
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/common_interface_defs.h
similarity index 98%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/common_interface_defs.h
index f979c6a..b4f977b 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/common_interface_defs.h
@@ -320,7 +320,7 @@
 /// signal callback runs during the switch, it will not benefit from stack
 /// use-after-return detection.
 ///
-/// \param fake_stack_save [out] Fake stack save location.
+/// \param[out] fake_stack_save Fake stack save location.
 /// \param bottom Bottom address of stack.
 /// \param size Size of stack in bytes.
 void __sanitizer_start_switch_fiber(void **fake_stack_save,
@@ -335,8 +335,8 @@
 /// <c>__sanitizer_start_switch_fiber()</c>.
 ///
 /// \param fake_stack_save Fake stack save location.
-/// \param bottom_old [out] Bottom address of old stack.
-/// \param size_old [out] Size of old stack in bytes.
+/// \param[out] bottom_old Bottom address of old stack.
+/// \param[out] size_old Size of old stack in bytes.
 void __sanitizer_finish_switch_fiber(void *fake_stack_save,
                                      const void **bottom_old,
                                      size_t *size_old);
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/coverage_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/dfsan_interface.h
similarity index 94%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/dfsan_interface.h
index 81546e5..18b2c81 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/dfsan_interface.h
@@ -80,9 +80,11 @@
 size_t dfsan_get_label_count(void);
 
 /// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
-/// with the application memory. Will work only if there are no other
-/// threads executing DFSan-instrumented code concurrently.
-/// Use this call to start over the taint tracking within the same procces.
+/// with the application memory.  Use this call to start over the taint tracking
+/// within the same process.
+///
+/// Note: If another thread is working with tainted data during the flush, that
+/// taint could still be written to shadow after the flush.
 void dfsan_flush(void);
 
 /// Sets a callback to be invoked on calls to write().  The callback is invoked
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/hwasan_interface.h
similarity index 96%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/hwasan_interface.h
index 4c9ad13..14035c0 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/hwasan_interface.h
@@ -73,6 +73,9 @@
    * accessed through the pointer in x, or -1 if the whole range is good. */
   intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
 
+  /* Sets the callback function to be called during HWASan error reporting. */
+  void __hwasan_set_error_report_callback(void (*callback)(const char *));
+
   int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
   void * __sanitizer_memalign(size_t alignment, size_t size);
   void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/linux_syscall_hooks.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/lsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/lsan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/msan_interface.h
similarity index 96%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/msan_interface.h
index d40c556..eeb39fb 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/msan_interface.h
@@ -114,6 +114,9 @@
      call to __msan_scoped_disable_interceptor_checks. */
   void __msan_scoped_enable_interceptor_checks(void);
 
+  void __msan_start_switch_fiber(const void *bottom, size_t size);
+  void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old);
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/netbsd_syscall_hooks.h
similarity index 95%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/netbsd_syscall_hooks.h
index 370da0e..f661152 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/netbsd_syscall_hooks.h
@@ -20,8 +20,8 @@
 // DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
 //
 // Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2019-12-24
-// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
+// Generated date: 2020-09-10
+// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp
 //
 //===----------------------------------------------------------------------===//
 #ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
@@ -474,7 +474,12 @@
   __sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to))
 #define __sanitizer_syscall_post_dup2(res, from, to)                           \
   __sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to))
-/* syscall 91 has been skipped */
+#define __sanitizer_syscall_pre_getrandom(buf, buflen, flags)                  \
+  __sanitizer_syscall_pre_impl_getrandom(                                      \
+      (long long)(buf), (long long)(buflen), (long long)(flags))
+#define __sanitizer_syscall_post_getrandom(res, buf, buflen, flags)            \
+  __sanitizer_syscall_post_impl_getrandom(                                     \
+      res, (long long)(buf), (long long)(buflen), (long long)(flags))
 #define __sanitizer_syscall_pre_fcntl(fd, cmd, arg)                            \
   __sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd),        \
                                      (long long)(arg))
@@ -849,9 +854,31 @@
 #define __sanitizer_syscall_post_sysarch(res, op, parms)                       \
   __sanitizer_syscall_post_impl_sysarch(res, (long long)(op),                  \
                                         (long long)(parms))
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+#define __sanitizer_syscall_pre___futex(uaddr, op, val, timeout, uaddr2, val2, \
+                                        val3)                                  \
+  __sanitizer_syscall_pre_impl___futex((long long)(uaddr), (long long)(op),    \
+                                       (long long)(val), (long long)(timeout), \
+                                       (long long)(uaddr2), (long long)(val2), \
+                                       (long long)(val3))
+#define __sanitizer_syscall_post___futex(res, uaddr, op, val, timeout, uaddr2, \
+                                         val2, val3)                           \
+  __sanitizer_syscall_post_impl___futex(                                       \
+      res, (long long)(uaddr), (long long)(op), (long long)(val),              \
+      (long long)(timeout), (long long)(uaddr2), (long long)(val2),            \
+      (long long)(val3))
+#define __sanitizer_syscall_pre___futex_set_robust_list(head, len)             \
+  __sanitizer_syscall_pre_impl___futex_set_robust_list((long long)(head),      \
+                                                       (long long)(len))
+#define __sanitizer_syscall_post___futex_set_robust_list(res, head, len)       \
+  __sanitizer_syscall_post_impl___futex_set_robust_list(                       \
+      res, (long long)(head), (long long)(len))
+#define __sanitizer_syscall_pre___futex_get_robust_list(lwpid, headp, lenp)    \
+  __sanitizer_syscall_pre_impl___futex_get_robust_list(                        \
+      (long long)(lwpid), (long long)(headp), (long long)(lenp))
+#define __sanitizer_syscall_post___futex_get_robust_list(res, lwpid, headp,    \
+                                                         lenp)                 \
+  __sanitizer_syscall_post_impl___futex_get_robust_list(                       \
+      res, (long long)(lwpid), (long long)(headp), (long long)(lenp))
 #if !defined(_LP64)
 #define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5)       \
   __sanitizer_syscall_pre_impl_compat_10_osemsys(                              \
@@ -2731,6 +2758,83 @@
   __sanitizer_syscall_post_impl___fhstatvfs190(                                \
       res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
       (long long)(flags))
+#define __sanitizer_syscall_pre___acl_get_link(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_get_link(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_link(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_get_link(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_link(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_set_link(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_link(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_set_link(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_delete_link(path, type)                  \
+  __sanitizer_syscall_pre_impl___acl_delete_link((long long)(path),            \
+                                                 (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_link(res, path, type)            \
+  __sanitizer_syscall_post_impl___acl_delete_link(res, (long long)(path),      \
+                                                  (long long)(type))
+#define __sanitizer_syscall_pre___acl_aclcheck_link(path, type, aclp)          \
+  __sanitizer_syscall_pre_impl___acl_aclcheck_link(                            \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_link(res, path, type, aclp)    \
+  __sanitizer_syscall_post_impl___acl_aclcheck_link(                           \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_get_file(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_get_file(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_file(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_get_file(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_file(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_set_file(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_file(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_set_file(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_get_fd(filedes, type, aclp)              \
+  __sanitizer_syscall_pre_impl___acl_get_fd(                                   \
+      (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_fd(res, filedes, type, aclp)        \
+  __sanitizer_syscall_post_impl___acl_get_fd(                                  \
+      res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_fd(filedes, type, aclp)              \
+  __sanitizer_syscall_pre_impl___acl_set_fd(                                   \
+      (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_fd(res, filedes, type, aclp)        \
+  __sanitizer_syscall_post_impl___acl_set_fd(                                  \
+      res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_delete_file(path, type)                  \
+  __sanitizer_syscall_pre_impl___acl_delete_file((long long)(path),            \
+                                                 (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_file(res, path, type)            \
+  __sanitizer_syscall_post_impl___acl_delete_file(res, (long long)(path),      \
+                                                  (long long)(type))
+#define __sanitizer_syscall_pre___acl_delete_fd(filedes, type)                 \
+  __sanitizer_syscall_pre_impl___acl_delete_fd((long long)(filedes),           \
+                                               (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_fd(res, filedes, type)           \
+  __sanitizer_syscall_post_impl___acl_delete_fd(res, (long long)(filedes),     \
+                                                (long long)(type))
+#define __sanitizer_syscall_pre___acl_aclcheck_file(path, type, aclp)          \
+  __sanitizer_syscall_pre_impl___acl_aclcheck_file(                            \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_file(res, path, type, aclp)    \
+  __sanitizer_syscall_post_impl___acl_aclcheck_file(                           \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_aclcheck_fd(filedes, type, aclp)         \
+  __sanitizer_syscall_pre_impl___acl_aclcheck_fd(                              \
+      (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_fd(res, filedes, type, aclp)   \
+  __sanitizer_syscall_post_impl___acl_aclcheck_fd(                             \
+      res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre_lpathconf(path, name)                          \
+  __sanitizer_syscall_pre_impl_lpathconf((long long)(path), (long long)(name))
+#define __sanitizer_syscall_post_lpathconf(res, path, name)                    \
+  __sanitizer_syscall_post_impl_lpathconf(res, (long long)(path),              \
+                                          (long long)(name))
 
 /* Compat with older releases */
 #define __sanitizer_syscall_pre_getvfsstat                                     \
@@ -3088,7 +3192,10 @@
 void __sanitizer_syscall_pre_impl_dup2(long long from, long long to);
 void __sanitizer_syscall_post_impl_dup2(long long res, long long from,
                                         long long to);
-/* syscall 91 has been skipped */
+void __sanitizer_syscall_pre_impl_getrandom(long long buf, long long buflen,
+                                            long long flags);
+void __sanitizer_syscall_post_impl_getrandom(long long res, long long buf,
+                                             long long buflen, long long flags);
 void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd,
                                         long long arg);
 void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd,
@@ -3380,9 +3487,26 @@
 void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms);
 void __sanitizer_syscall_post_impl_sysarch(long long res, long long op,
                                            long long parms);
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+void __sanitizer_syscall_pre_impl___futex(long long uaddr, long long op,
+                                          long long val, long long timeout,
+                                          long long uaddr2, long long val2,
+                                          long long val3);
+void __sanitizer_syscall_post_impl___futex(long long res, long long uaddr,
+                                           long long op, long long val,
+                                           long long timeout, long long uaddr2,
+                                           long long val2, long long val3);
+void __sanitizer_syscall_pre_impl___futex_set_robust_list(long long head,
+                                                          long long len);
+void __sanitizer_syscall_post_impl___futex_set_robust_list(long long res,
+                                                           long long head,
+                                                           long long len);
+void __sanitizer_syscall_pre_impl___futex_get_robust_list(long long lwpid,
+                                                          long long headp,
+                                                          long long lenp);
+void __sanitizer_syscall_post_impl___futex_get_robust_list(long long res,
+                                                           long long lwpid,
+                                                           long long headp,
+                                                           long long lenp);
 #if !defined(_LP64)
 void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which,
                                                     long long a2, long long a3,
@@ -4802,6 +4926,75 @@
                                                   long long fh_size,
                                                   long long buf,
                                                   long long flags);
+void __sanitizer_syscall_pre_impl___acl_get_link(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_link(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_link(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_link(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_delete_link(long long path,
+                                                    long long type);
+void __sanitizer_syscall_post_impl___acl_delete_link(long long res,
+                                                     long long path,
+                                                     long long type);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_link(long long path,
+                                                      long long type,
+                                                      long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_link(long long res,
+                                                       long long path,
+                                                       long long type,
+                                                       long long aclp);
+void __sanitizer_syscall_pre_impl___acl_get_file(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_file(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_file(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_file(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_get_fd(long long filedes,
+                                               long long type, long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_fd(long long res,
+                                                long long filedes,
+                                                long long type, long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_fd(long long filedes,
+                                               long long type, long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_fd(long long res,
+                                                long long filedes,
+                                                long long type, long long aclp);
+void __sanitizer_syscall_pre_impl___acl_delete_file(long long path,
+                                                    long long type);
+void __sanitizer_syscall_post_impl___acl_delete_file(long long res,
+                                                     long long path,
+                                                     long long type);
+void __sanitizer_syscall_pre_impl___acl_delete_fd(long long filedes,
+                                                  long long type);
+void __sanitizer_syscall_post_impl___acl_delete_fd(long long res,
+                                                   long long filedes,
+                                                   long long type);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_file(long long path,
+                                                      long long type,
+                                                      long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_file(long long res,
+                                                       long long path,
+                                                       long long type,
+                                                       long long aclp);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_fd(long long filedes,
+                                                    long long type,
+                                                    long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_fd(long long res,
+                                                     long long filedes,
+                                                     long long type,
+                                                     long long aclp);
+void __sanitizer_syscall_pre_impl_lpathconf(long long path, long long name);
+void __sanitizer_syscall_post_impl_lpathconf(long long res, long long path,
+                                             long long name);
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/scudo_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/scudo_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface_atomic.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h b/darwin-x86/lib64/clang/12.0.1/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h
rename to darwin-x86/lib64/clang/12.0.1/include/sanitizer/ubsan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/serializeintrin.h b/darwin-x86/lib64/clang/12.0.1/include/serializeintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/serializeintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/serializeintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sgxintrin.h b/darwin-x86/lib64/clang/12.0.1/include/sgxintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/sgxintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/sgxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/shaintrin.h b/darwin-x86/lib64/clang/12.0.1/include/shaintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/shaintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/shaintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/smmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/smmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/smmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/smmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/stdalign.h b/darwin-x86/lib64/clang/12.0.1/include/stdalign.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/stdalign.h
rename to darwin-x86/lib64/clang/12.0.1/include/stdalign.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/stdarg.h b/darwin-x86/lib64/clang/12.0.1/include/stdarg.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/stdarg.h
rename to darwin-x86/lib64/clang/12.0.1/include/stdarg.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/stdatomic.h b/darwin-x86/lib64/clang/12.0.1/include/stdatomic.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/stdatomic.h
rename to darwin-x86/lib64/clang/12.0.1/include/stdatomic.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/stdbool.h b/darwin-x86/lib64/clang/12.0.1/include/stdbool.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/stdbool.h
rename to darwin-x86/lib64/clang/12.0.1/include/stdbool.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/stddef.h b/darwin-x86/lib64/clang/12.0.1/include/stddef.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/stddef.h
rename to darwin-x86/lib64/clang/12.0.1/include/stddef.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/stdint.h b/darwin-x86/lib64/clang/12.0.1/include/stdint.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/stdint.h
rename to darwin-x86/lib64/clang/12.0.1/include/stdint.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/stdnoreturn.h b/darwin-x86/lib64/clang/12.0.1/include/stdnoreturn.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/stdnoreturn.h
rename to darwin-x86/lib64/clang/12.0.1/include/stdnoreturn.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/tbmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/tbmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/tbmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/tbmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/tgmath.h b/darwin-x86/lib64/clang/12.0.1/include/tgmath.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/tgmath.h
rename to darwin-x86/lib64/clang/12.0.1/include/tgmath.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/tmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/tmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/tmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/tmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h b/darwin-x86/lib64/clang/12.0.1/include/tsxldtrkintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/tsxldtrkintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/unwind.h b/darwin-x86/lib64/clang/12.0.1/include/unwind.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/unwind.h
rename to darwin-x86/lib64/clang/12.0.1/include/unwind.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/vadefs.h b/darwin-x86/lib64/clang/12.0.1/include/vadefs.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/vadefs.h
rename to darwin-x86/lib64/clang/12.0.1/include/vadefs.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/vaesintrin.h b/darwin-x86/lib64/clang/12.0.1/include/vaesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/vaesintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/vaesintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/varargs.h b/darwin-x86/lib64/clang/12.0.1/include/varargs.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/varargs.h
rename to darwin-x86/lib64/clang/12.0.1/include/varargs.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/vecintrin.h b/darwin-x86/lib64/clang/12.0.1/include/vecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/vecintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/vecintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h b/darwin-x86/lib64/clang/12.0.1/include/vpclmulqdqintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/vpclmulqdqintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/waitpkgintrin.h b/darwin-x86/lib64/clang/12.0.1/include/waitpkgintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/waitpkgintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/waitpkgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h b/darwin-x86/lib64/clang/12.0.1/include/wasm_simd128.h
similarity index 91%
rename from darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h
rename to darwin-x86/lib64/clang/12.0.1/include/wasm_simd128.h
index b781238..ac88516 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h
+++ b/darwin-x86/lib64/clang/12.0.1/include/wasm_simd128.h
@@ -18,8 +18,7 @@
 
 // Internal types determined by clang builtin definitions
 typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef signed char __s8x16
+typedef signed char __i8x16
     __attribute__((__vector_size__(16), __aligned__(16)));
 typedef unsigned char __u8x16
     __attribute__((__vector_size__(16), __aligned__(16)));
@@ -35,6 +34,13 @@
 typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
 typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
 
+typedef signed char __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+typedef unsigned char __u8x8
+    __attribute__((__vector_size__(8), __aligned__(8)));
+typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+typedef unsigned short __u16x4
+    __attribute__((__vector_size__(8), __aligned__(8)));
+
 #define __DEFAULT_FN_ATTRS                                                     \
   __attribute__((__always_inline__, __nodebug__, __target__("simd128"),        \
                  __min_vector_width__(128)))
@@ -273,7 +279,7 @@
   (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
 
 #define wasm_u8x16_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
+  (__builtin_wasm_extract_lane_u_i8x16((__u8x16)(__a), __i))
 
 #define wasm_i8x16_replace_lane(__a, __i, __b)                                 \
   ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
@@ -286,7 +292,7 @@
   (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
 
 #define wasm_u16x8_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
+  (__builtin_wasm_extract_lane_u_i16x8((__u16x8)(__a), __i))
 
 #define wasm_i16x8_replace_lane(__a, __i, __b)                                 \
   ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
@@ -333,17 +339,17 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a == (__s8x16)__b);
+  return (v128_t)((__i8x16)__a == (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a != (__s8x16)__b);
+  return (v128_t)((__i8x16)__a != (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a < (__s8x16)__b);
+  return (v128_t)((__i8x16)__a < (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
@@ -353,7 +359,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a > (__s8x16)__b);
+  return (v128_t)((__i8x16)__a > (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
@@ -363,7 +369,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a <= (__s8x16)__b);
+  return (v128_t)((__i8x16)__a <= (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
@@ -373,7 +379,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a >= (__s8x16)__b);
+  return (v128_t)((__i8x16)__a >= (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
@@ -595,7 +601,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
                                                            int32_t __b) {
-  return (v128_t)((__s8x16)__a >> __b);
+  return (v128_t)((__i8x16)__a >> __b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
@@ -616,8 +622,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
+  return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__u8x16)__a,
+                                                     (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
@@ -633,8 +639,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
+  return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__u8x16)__a,
+                                                     (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
@@ -644,7 +650,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
+  return (v128_t)__builtin_wasm_min_u_i8x16((__u8x16)__a, (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
@@ -654,12 +660,12 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
+  return (v128_t)__builtin_wasm_max_u_i8x16((__u8x16)__a, (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
                                                             v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
+  return (v128_t)__builtin_wasm_avgr_u_i8x16((__u8x16)__a, (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
@@ -706,8 +712,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
+  return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__u16x8)__a,
+                                                     (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
@@ -723,8 +729,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
+  return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__u16x8)__a,
+                                                     (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
@@ -739,7 +745,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
+  return (v128_t)__builtin_wasm_min_u_i16x8((__u16x8)__a, (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
@@ -749,12 +755,12 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
+  return (v128_t)__builtin_wasm_max_u_i16x8((__u16x8)__a, (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
                                                             v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
+  return (v128_t)__builtin_wasm_avgr_u_i16x8((__u16x8)__a, (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
@@ -810,7 +816,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
+  return (v128_t)__builtin_wasm_min_u_i32x4((__u32x4)__a, (__u32x4)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
@@ -820,7 +826,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
+  return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
@@ -1071,8 +1077,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
+  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__u16x8)__a,
+                                                     (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
@@ -1083,48 +1089,76 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
-                                                     (__i32x4)__b);
+  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__u32x4)__a,
+                                                     (__u32x4)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_low_i8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2],
+               ((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5],
+               ((__i8x16)__a)[6], ((__i8x16)__a)[7]},
+      __i16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_high_i8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10],
+               ((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13],
+               ((__i8x16)__a)[14], ((__i8x16)__a)[15]},
+      __i16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_low_u8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2],
+               ((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5],
+               ((__u8x16)__a)[6], ((__u8x16)__a)[7]},
+      __u16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_high_u8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10],
+               ((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13],
+               ((__u8x16)__a)[14], ((__u8x16)__a)[15]},
+      __u16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_low_i16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2],
+                ((__i16x8)__a)[3]},
+      __i32x4);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_high_i16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6],
+                ((__i16x8)__a)[7]},
+      __i32x4);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_low_u16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2],
+                ((__u16x8)__a)[3]},
+      __u32x4);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_high_u16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6],
+                ((__u16x8)__a)[7]},
+      __u32x4);
 }
 
 // Undefine helper macros
diff --git a/darwin-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h b/darwin-x86/lib64/clang/12.0.1/include/wbnoinvdintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/wbnoinvdintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/wmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/wmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/wmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/wmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/x86intrin.h b/darwin-x86/lib64/clang/12.0.1/include/x86intrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/x86intrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/x86intrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/xmmintrin.h b/darwin-x86/lib64/clang/12.0.1/include/xmmintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/xmmintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/xmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/xopintrin.h b/darwin-x86/lib64/clang/12.0.1/include/xopintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/xopintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/xopintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/xsavecintrin.h b/darwin-x86/lib64/clang/12.0.1/include/xsavecintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/xsavecintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/xsavecintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/xsaveintrin.h b/darwin-x86/lib64/clang/12.0.1/include/xsaveintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/xsaveintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/xsaveintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h b/darwin-x86/lib64/clang/12.0.1/include/xsaveoptintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/xsaveoptintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/xsavesintrin.h b/darwin-x86/lib64/clang/12.0.1/include/xsavesintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/xsavesintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/xsavesintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/xtestintrin.h b/darwin-x86/lib64/clang/12.0.1/include/xtestintrin.h
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/include/xtestintrin.h
rename to darwin-x86/lib64/clang/12.0.1/include/xtestintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/share/asan_blacklist.txt b/darwin-x86/lib64/clang/12.0.1/share/asan_blacklist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/share/asan_blacklist.txt
rename to darwin-x86/lib64/clang/12.0.1/share/asan_blacklist.txt
diff --git a/darwin-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt b/darwin-x86/lib64/clang/12.0.1/share/cfi_blacklist.txt
similarity index 100%
rename from darwin-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt
rename to darwin-x86/lib64/clang/12.0.1/share/cfi_blacklist.txt
diff --git a/darwin-x86/lib64/libLLVM.dylib b/darwin-x86/lib64/libLLVM.dylib
index bdf2fcc..3f85629 100755
--- a/darwin-x86/lib64/libLLVM.dylib
+++ b/darwin-x86/lib64/libLLVM.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libbase.dylib b/darwin-x86/lib64/libbase.dylib
index e09786c..cf5e10e 100755
--- a/darwin-x86/lib64/libbase.dylib
+++ b/darwin-x86/lib64/libbase.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.1.dylib b/darwin-x86/lib64/libc++.1.dylib
index ee89286..f6f92fa 100755
--- a/darwin-x86/lib64/libc++.1.dylib
+++ b/darwin-x86/lib64/libc++.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++.dylib b/darwin-x86/lib64/libc++.dylib
index 7235172..050f6d0 100755
--- a/darwin-x86/lib64/libc++.dylib
+++ b/darwin-x86/lib64/libc++.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libc++abi.1.dylib b/darwin-x86/lib64/libc++abi.1.dylib
index cd3c0d1..17b0ee1 100755
--- a/darwin-x86/lib64/libc++abi.1.dylib
+++ b/darwin-x86/lib64/libc++abi.1.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libclang_cxx.dylib b/darwin-x86/lib64/libclang_cxx.dylib
index 1f2795f..65bf8cd 100755
--- a/darwin-x86/lib64/libclang_cxx.dylib
+++ b/darwin-x86/lib64/libclang_cxx.dylib
Binary files differ
diff --git a/darwin-x86/lib64/liblog.dylib b/darwin-x86/lib64/liblog.dylib
index d2aecee..4435076 100755
--- a/darwin-x86/lib64/liblog.dylib
+++ b/darwin-x86/lib64/liblog.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libprotobuf-cpp-full.dylib b/darwin-x86/lib64/libprotobuf-cpp-full.dylib
index 8bc37ec..3889d14 100755
--- a/darwin-x86/lib64/libprotobuf-cpp-full.dylib
+++ b/darwin-x86/lib64/libprotobuf-cpp-full.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libxml2.2.9.10.dylib b/darwin-x86/lib64/libxml2.2.9.10.dylib
index a8d1171..d93f11a 100755
--- a/darwin-x86/lib64/libxml2.2.9.10.dylib
+++ b/darwin-x86/lib64/libxml2.2.9.10.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libz-host.dylib b/darwin-x86/lib64/libz-host.dylib
index d8db66b..a6a2260 100755
--- a/darwin-x86/lib64/libz-host.dylib
+++ b/darwin-x86/lib64/libz-host.dylib
Binary files differ
diff --git a/darwin-x86/lib64/libziparchive.dylib b/darwin-x86/lib64/libziparchive.dylib
index b8fc965..3541cc8 100755
--- a/darwin-x86/lib64/libziparchive.dylib
+++ b/darwin-x86/lib64/libziparchive.dylib
Binary files differ
diff --git a/linux-x86/bin/cxx_extractor b/linux-x86/bin/cxx_extractor
index 890128c..24b18c8 100755
--- a/linux-x86/bin/cxx_extractor
+++ b/linux-x86/bin/cxx_extractor
Binary files differ
diff --git a/linux-x86/bin/header-abi-diff b/linux-x86/bin/header-abi-diff
index 3d8471a..9e01acf 100755
--- a/linux-x86/bin/header-abi-diff
+++ b/linux-x86/bin/header-abi-diff
Binary files differ
diff --git a/linux-x86/bin/header-abi-dumper b/linux-x86/bin/header-abi-dumper
index dd64c6c..0786ab3 100755
--- a/linux-x86/bin/header-abi-dumper
+++ b/linux-x86/bin/header-abi-dumper
Binary files differ
diff --git a/linux-x86/bin/header-abi-linker b/linux-x86/bin/header-abi-linker
index 618326c..7e1fbfa 100755
--- a/linux-x86/bin/header-abi-linker
+++ b/linux-x86/bin/header-abi-linker
Binary files differ
diff --git a/linux-x86/bin/proto_metadata_plugin b/linux-x86/bin/proto_metadata_plugin
index d6470e0..b5f7092 100755
--- a/linux-x86/bin/proto_metadata_plugin
+++ b/linux-x86/bin/proto_metadata_plugin
Binary files differ
diff --git a/linux-x86/bin/protoc_extractor b/linux-x86/bin/protoc_extractor
index 9178e95..75d6261 100755
--- a/linux-x86/bin/protoc_extractor
+++ b/linux-x86/bin/protoc_extractor
Binary files differ
diff --git a/linux-x86/bin/versioner b/linux-x86/bin/versioner
index 06e7cf5..a74b7f0 100755
--- a/linux-x86/bin/versioner
+++ b/linux-x86/bin/versioner
Binary files differ
diff --git a/linux-x86/clang-headers b/linux-x86/clang-headers
index e8f09e7..0005fe8 120000
--- a/linux-x86/clang-headers
+++ b/linux-x86/clang-headers
@@ -1 +1 @@
-lib64/clang/11.0.5/include
\ No newline at end of file
+lib64/clang/12.0.1/include
\ No newline at end of file
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
deleted file mode 100644
index 2ba1521..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
+++ /dev/null
@@ -1,112 +0,0 @@
-/*===---- cuda_builtin_vars.h - CUDA built-in variables ---------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CUDA_BUILTIN_VARS_H
-#define __CUDA_BUILTIN_VARS_H
-
-// Forward declares from vector_types.h.
-struct uint3;
-struct dim3;
-
-// The file implements built-in CUDA variables using __declspec(property).
-// https://msdn.microsoft.com/en-us/library/yhfk0thd.aspx
-// All read accesses of built-in variable fields get converted into calls to a
-// getter function which in turn calls the appropriate builtin to fetch the
-// value.
-//
-// Example:
-//    int x = threadIdx.x;
-// IR output:
-//  %0 = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() #3
-// PTX output:
-//  mov.u32     %r2, %tid.x;
-
-#define __CUDA_DEVICE_BUILTIN(FIELD, INTRINSIC)                                \
-  __declspec(property(get = __fetch_builtin_##FIELD)) unsigned int FIELD;      \
-  static inline __attribute__((always_inline))                                 \
-      __attribute__((device)) unsigned int __fetch_builtin_##FIELD(void) {     \
-    return INTRINSIC;                                                          \
-  }
-
-#if __cplusplus >= 201103L
-#define __DELETE =delete
-#else
-#define __DELETE
-#endif
-
-// Make sure nobody can create instances of the special variable types.  nvcc
-// also disallows taking address of special variables, so we disable address-of
-// operator as well.
-#define __CUDA_DISALLOW_BUILTINVAR_ACCESS(TypeName)                            \
-  __attribute__((device)) TypeName() __DELETE;                                 \
-  __attribute__((device)) TypeName(const TypeName &) __DELETE;                 \
-  __attribute__((device)) void operator=(const TypeName &) const __DELETE;     \
-  __attribute__((device)) TypeName *operator&() const __DELETE
-
-struct __cuda_builtin_threadIdx_t {
-  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_tid_x());
-  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_tid_y());
-  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z());
-  // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a
-  // uint3).  This function is defined after we pull in vector_types.h.
-  __attribute__((device)) operator uint3() const;
-private:
-  __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
-};
-
-struct __cuda_builtin_blockIdx_t {
-  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ctaid_x());
-  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ctaid_y());
-  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z());
-  // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a
-  // uint3).  This function is defined after we pull in vector_types.h.
-  __attribute__((device)) operator uint3() const;
-private:
-  __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
-};
-
-struct __cuda_builtin_blockDim_t {
-  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_ntid_x());
-  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_ntid_y());
-  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ntid_z());
-  // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a
-  // dim3).  This function is defined after we pull in vector_types.h.
-  __attribute__((device)) operator dim3() const;
-private:
-  __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
-};
-
-struct __cuda_builtin_gridDim_t {
-  __CUDA_DEVICE_BUILTIN(x,__nvvm_read_ptx_sreg_nctaid_x());
-  __CUDA_DEVICE_BUILTIN(y,__nvvm_read_ptx_sreg_nctaid_y());
-  __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_nctaid_z());
-  // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a
-  // dim3).  This function is defined after we pull in vector_types.h.
-  __attribute__((device)) operator dim3() const;
-private:
-  __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
-};
-
-#define __CUDA_BUILTIN_VAR                                                     \
-  extern const __attribute__((device)) __attribute__((weak))
-__CUDA_BUILTIN_VAR __cuda_builtin_threadIdx_t threadIdx;
-__CUDA_BUILTIN_VAR __cuda_builtin_blockIdx_t blockIdx;
-__CUDA_BUILTIN_VAR __cuda_builtin_blockDim_t blockDim;
-__CUDA_BUILTIN_VAR __cuda_builtin_gridDim_t gridDim;
-
-// warpSize should translate to read of %WARP_SZ but there's currently no
-// builtin to do so. According to PTX v4.2 docs 'to date, all target
-// architectures have a WARP_SZ value of 32'.
-__attribute__((device)) const int warpSize = 32;
-
-#undef __CUDA_DEVICE_BUILTIN
-#undef __CUDA_BUILTIN_VAR
-#undef __CUDA_DISALLOW_BUILTINVAR_ACCESS
-
-#endif /* __CUDA_BUILTIN_VARS_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
deleted file mode 100644
index 8ba1826..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
+++ /dev/null
@@ -1,470 +0,0 @@
-/*===---- __clang_cuda_cmath.h - Device-side CUDA cmath support ------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __CLANG_CUDA_CMATH_H__
-#define __CLANG_CUDA_CMATH_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
-#endif
-
-#ifndef __OPENMP_NVPTX__
-#include <limits>
-#endif
-
-// CUDA lets us use various std math functions on the device side.  This file
-// works in concert with __clang_cuda_math_forward_declares.h to make this work.
-//
-// Specifically, the forward-declares header declares __device__ overloads for
-// these functions in the global namespace, then pulls them into namespace std
-// with 'using' statements.  Then this file implements those functions, after
-// their implementations have been pulled in.
-//
-// It's important that we declare the functions in the global namespace and pull
-// them into namespace std with using statements, as opposed to simply declaring
-// these functions in namespace std, because our device functions need to
-// overload the standard library functions, which may be declared in the global
-// namespace or in std, depending on the degree of conformance of the stdlib
-// implementation.  Declaring in the global namespace and pulling into namespace
-// std covers all of the known knowns.
-
-#ifdef __OPENMP_NVPTX__
-#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
-#else
-#define __DEVICE__ static __device__ __inline__ __attribute__((always_inline))
-#endif
-
-__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
-__DEVICE__ long abs(long __n) { return ::labs(__n); }
-__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
-__DEVICE__ double abs(double __x) { return ::fabs(__x); }
-__DEVICE__ float acos(float __x) { return ::acosf(__x); }
-__DEVICE__ float asin(float __x) { return ::asinf(__x); }
-__DEVICE__ float atan(float __x) { return ::atanf(__x); }
-__DEVICE__ float atan2(float __x, float __y) { return ::atan2f(__x, __y); }
-__DEVICE__ float ceil(float __x) { return ::ceilf(__x); }
-__DEVICE__ float cos(float __x) { return ::cosf(__x); }
-__DEVICE__ float cosh(float __x) { return ::coshf(__x); }
-__DEVICE__ float exp(float __x) { return ::expf(__x); }
-__DEVICE__ float fabs(float __x) { return ::fabsf(__x); }
-__DEVICE__ float floor(float __x) { return ::floorf(__x); }
-__DEVICE__ float fmod(float __x, float __y) { return ::fmodf(__x, __y); }
-__DEVICE__ int fpclassify(float __x) {
-  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
-                              FP_ZERO, __x);
-}
-__DEVICE__ int fpclassify(double __x) {
-  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
-                              FP_ZERO, __x);
-}
-__DEVICE__ float frexp(float __arg, int *__exp) {
-  return ::frexpf(__arg, __exp);
-}
-
-// For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows. For OpenMP we omit these as some old system headers have
-// non-conforming `isinf(float)` and `isnan(float)` implementations that return
-// an `int`. The system versions of these functions should be fine anyway.
-#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
-__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
-__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
-__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
-// For inscrutable reasons, __finite(), the double-precision version of
-// __finitef, does not exist when compiling for MacOS.  __isfinited is available
-// everywhere and is just as good.
-__DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); }
-__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
-__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
-#endif
-
-__DEVICE__ bool isgreater(float __x, float __y) {
-  return __builtin_isgreater(__x, __y);
-}
-__DEVICE__ bool isgreater(double __x, double __y) {
-  return __builtin_isgreater(__x, __y);
-}
-__DEVICE__ bool isgreaterequal(float __x, float __y) {
-  return __builtin_isgreaterequal(__x, __y);
-}
-__DEVICE__ bool isgreaterequal(double __x, double __y) {
-  return __builtin_isgreaterequal(__x, __y);
-}
-__DEVICE__ bool isless(float __x, float __y) {
-  return __builtin_isless(__x, __y);
-}
-__DEVICE__ bool isless(double __x, double __y) {
-  return __builtin_isless(__x, __y);
-}
-__DEVICE__ bool islessequal(float __x, float __y) {
-  return __builtin_islessequal(__x, __y);
-}
-__DEVICE__ bool islessequal(double __x, double __y) {
-  return __builtin_islessequal(__x, __y);
-}
-__DEVICE__ bool islessgreater(float __x, float __y) {
-  return __builtin_islessgreater(__x, __y);
-}
-__DEVICE__ bool islessgreater(double __x, double __y) {
-  return __builtin_islessgreater(__x, __y);
-}
-__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
-__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
-__DEVICE__ bool isunordered(float __x, float __y) {
-  return __builtin_isunordered(__x, __y);
-}
-__DEVICE__ bool isunordered(double __x, double __y) {
-  return __builtin_isunordered(__x, __y);
-}
-__DEVICE__ float ldexp(float __arg, int __exp) {
-  return ::ldexpf(__arg, __exp);
-}
-__DEVICE__ float log(float __x) { return ::logf(__x); }
-__DEVICE__ float log10(float __x) { return ::log10f(__x); }
-__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
-__DEVICE__ float pow(float __base, float __exp) {
-  return ::powf(__base, __exp);
-}
-__DEVICE__ float pow(float __base, int __iexp) {
-  return ::powif(__base, __iexp);
-}
-__DEVICE__ double pow(double __base, int __iexp) {
-  return ::powi(__base, __iexp);
-}
-__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
-__DEVICE__ bool signbit(double __x) { return ::__signbitd(__x); }
-__DEVICE__ float sin(float __x) { return ::sinf(__x); }
-__DEVICE__ float sinh(float __x) { return ::sinhf(__x); }
-__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); }
-__DEVICE__ float tan(float __x) { return ::tanf(__x); }
-__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
-
-// Notably missing above is nexttoward.  We omit it because
-// libdevice doesn't provide an implementation, and we don't want to be in the
-// business of implementing tricky libm functions in this header.
-
-#ifndef __OPENMP_NVPTX__
-
-// Now we've defined everything we promised we'd define in
-// __clang_cuda_math_forward_declares.h.  We need to do two additional things to
-// fix up our math functions.
-//
-// 1) Define __device__ overloads for e.g. sin(int).  The CUDA headers define
-//    only sin(float) and sin(double), which means that e.g. sin(0) is
-//    ambiguous.
-//
-// 2) Pull the __device__ overloads of "foobarf" math functions into namespace
-//    std.  These are defined in the CUDA headers in the global namespace,
-//    independent of everything else we've done here.
-
-// We can't use std::enable_if, because we want to be pre-C++11 compatible.  But
-// we go ahead and unconditionally define functions that are only available when
-// compiling for C++11 to match the behavior of the CUDA headers.
-template<bool __B, class __T = void>
-struct __clang_cuda_enable_if {};
-
-template <class __T> struct __clang_cuda_enable_if<true, __T> {
-  typedef __T type;
-};
-
-// Defines an overload of __fn that accepts one integral argument, calls
-// __fn((double)x), and returns __retty.
-#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn)                      \
-  template <typename __T>                                                      \
-  __DEVICE__                                                                   \
-      typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,    \
-                                      __retty>::type                           \
-      __fn(__T __x) {                                                          \
-    return ::__fn((double)__x);                                                \
-  }
-
-// Defines an overload of __fn that accepts one two arithmetic arguments, calls
-// __fn((double)x, (double)y), and returns a double.
-//
-// Note this is different from OVERLOAD_1, which generates an overload that
-// accepts only *integral* arguments.
-#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn)                      \
-  template <typename __T1, typename __T2>                                      \
-  __DEVICE__ typename __clang_cuda_enable_if<                                  \
-      std::numeric_limits<__T1>::is_specialized &&                             \
-          std::numeric_limits<__T2>::is_specialized,                           \
-      __retty>::type                                                           \
-  __fn(__T1 __x, __T2 __y) {                                                   \
-    return __fn((double)__x, (double)__y);                                     \
-  }
-
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acos)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acosh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asin)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asinh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atan)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, atan2);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atanh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cbrt)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, ceil)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, copysign);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cos)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cosh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erf)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erfc)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp2)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, expm1)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, fabs)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fdim);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, floor)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmax);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmin);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmod);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, fpclassify)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, hypot);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, ilogb)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isfinite)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreater);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreaterequal);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isinf);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isless);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessequal);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessgreater);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnan);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnormal)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isunordered);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, lgamma)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log10)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log1p)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log2)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, logb)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llrint)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llround)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lrint)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lround)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, nearbyint);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, nextafter);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, pow);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, remainder);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, rint);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, round);
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, signbit)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sin)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sinh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sqrt)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tan)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tanh)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tgamma)
-__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, trunc);
-
-#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1
-#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2
-
-// Overloads for functions that don't match the patterns expected by
-// __CUDA_CLANG_FN_INTEGER_OVERLOAD_{1,2}.
-template <typename __T1, typename __T2, typename __T3>
-__DEVICE__ typename __clang_cuda_enable_if<
-    std::numeric_limits<__T1>::is_specialized &&
-        std::numeric_limits<__T2>::is_specialized &&
-        std::numeric_limits<__T3>::is_specialized,
-    double>::type
-fma(__T1 __x, __T2 __y, __T3 __z) {
-  return std::fma((double)__x, (double)__y, (double)__z);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-frexp(__T __x, int *__exp) {
-  return std::frexp((double)__x, __exp);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-ldexp(__T __x, int __exp) {
-  return std::ldexp((double)__x, __exp);
-}
-
-template <typename __T1, typename __T2>
-__DEVICE__ typename __clang_cuda_enable_if<
-    std::numeric_limits<__T1>::is_specialized &&
-        std::numeric_limits<__T2>::is_specialized,
-    double>::type
-remquo(__T1 __x, __T2 __y, int *__quo) {
-  return std::remquo((double)__x, (double)__y, __quo);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-scalbln(__T __x, long __exp) {
-  return std::scalbln((double)__x, __exp);
-}
-
-template <typename __T>
-__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
-                                           double>::type
-scalbn(__T __x, int __exp) {
-  return std::scalbn((double)__x, __exp);
-}
-
-// We need to define these overloads in exactly the namespace our standard
-// library uses (including the right inline namespace), otherwise they won't be
-// picked up by other functions in the standard library (e.g. functions in
-// <complex>).  Thus the ugliness below.
-#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
-_LIBCPP_BEGIN_NAMESPACE_STD
-#else
-namespace std {
-#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
-_GLIBCXX_BEGIN_NAMESPACE_VERSION
-#endif
-#endif
-
-// Pull the new overloads we defined above into namespace std.
-using ::acos;
-using ::acosh;
-using ::asin;
-using ::asinh;
-using ::atan;
-using ::atan2;
-using ::atanh;
-using ::cbrt;
-using ::ceil;
-using ::copysign;
-using ::cos;
-using ::cosh;
-using ::erf;
-using ::erfc;
-using ::exp;
-using ::exp2;
-using ::expm1;
-using ::fabs;
-using ::fdim;
-using ::floor;
-using ::fma;
-using ::fmax;
-using ::fmin;
-using ::fmod;
-using ::fpclassify;
-using ::frexp;
-using ::hypot;
-using ::ilogb;
-using ::isfinite;
-using ::isgreater;
-using ::isgreaterequal;
-using ::isless;
-using ::islessequal;
-using ::islessgreater;
-using ::isnormal;
-using ::isunordered;
-using ::ldexp;
-using ::lgamma;
-using ::llrint;
-using ::llround;
-using ::log;
-using ::log10;
-using ::log1p;
-using ::log2;
-using ::logb;
-using ::lrint;
-using ::lround;
-using ::nearbyint;
-using ::nextafter;
-using ::pow;
-using ::remainder;
-using ::remquo;
-using ::rint;
-using ::round;
-using ::scalbln;
-using ::scalbn;
-using ::signbit;
-using ::sin;
-using ::sinh;
-using ::sqrt;
-using ::tan;
-using ::tanh;
-using ::tgamma;
-using ::trunc;
-
-// Well this is fun: We need to pull these symbols in for libc++, but we can't
-// pull them in with libstdc++, because its ::isinf and ::isnan are different
-// than its std::isinf and std::isnan.
-#ifndef __GLIBCXX__
-using ::isinf;
-using ::isnan;
-#endif
-
-// Finally, pull the "foobarf" functions that CUDA defines in its headers into
-// namespace std.
-using ::acosf;
-using ::acoshf;
-using ::asinf;
-using ::asinhf;
-using ::atan2f;
-using ::atanf;
-using ::atanhf;
-using ::cbrtf;
-using ::ceilf;
-using ::copysignf;
-using ::cosf;
-using ::coshf;
-using ::erfcf;
-using ::erff;
-using ::exp2f;
-using ::expf;
-using ::expm1f;
-using ::fabsf;
-using ::fdimf;
-using ::floorf;
-using ::fmaf;
-using ::fmaxf;
-using ::fminf;
-using ::fmodf;
-using ::frexpf;
-using ::hypotf;
-using ::ilogbf;
-using ::ldexpf;
-using ::lgammaf;
-using ::llrintf;
-using ::llroundf;
-using ::log10f;
-using ::log1pf;
-using ::log2f;
-using ::logbf;
-using ::logf;
-using ::lrintf;
-using ::lroundf;
-using ::modff;
-using ::nearbyintf;
-using ::nextafterf;
-using ::powf;
-using ::remainderf;
-using ::remquof;
-using ::rintf;
-using ::roundf;
-using ::scalblnf;
-using ::scalbnf;
-using ::sinf;
-using ::sinhf;
-using ::sqrtf;
-using ::tanf;
-using ::tanhf;
-using ::tgammaf;
-using ::truncf;
-
-#ifdef _LIBCPP_END_NAMESPACE_STD
-_LIBCPP_END_NAMESPACE_STD
-#else
-#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
-_GLIBCXX_END_NAMESPACE_VERSION
-#endif
-} // namespace std
-#endif
-
-#endif // __OPENMP_NVPTX__
-
-#undef __DEVICE__
-
-#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h b/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
deleted file mode 100644
index 332e616..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/*===---- __clang_cuda_math.h - Device-side CUDA math support --------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-#ifndef __CLANG_CUDA_MATH_H__
-#define __CLANG_CUDA_MATH_H__
-#ifndef __CUDA__
-#error "This file is for CUDA compilation only."
-#endif
-
-#ifndef __OPENMP_NVPTX__
-#if CUDA_VERSION < 9000
-#error This file is intended to be used with CUDA-9+ only.
-#endif
-#endif
-
-// __DEVICE__ is a helper macro with common set of attributes for the wrappers
-// we implement in this file. We need static in order to avoid emitting unused
-// functions and __forceinline__ helps inlining these wrappers at -O1.
-#pragma push_macro("__DEVICE__")
-#ifdef __OPENMP_NVPTX__
-#if defined(__cplusplus)
-#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
-#else
-#define __DEVICE__ static __attribute__((always_inline, nothrow))
-#endif
-#else
-#define __DEVICE__ static __device__ __forceinline__
-#endif
-
-// Specialized version of __DEVICE__ for functions with void return type. Needed
-// because the OpenMP overlay requires constexpr functions here but prior to
-// c++14 void return functions could not be constexpr.
-#pragma push_macro("__DEVICE_VOID__")
-#ifdef __OPENMP_NVPTX__ && defined(__cplusplus) && __cplusplus < 201402L
-#define __DEVICE_VOID__ static __attribute__((always_inline, nothrow))
-#else
-#define __DEVICE_VOID__ __DEVICE__
-#endif
-
-// libdevice provides fast low precision and slow full-recision implementations
-// for some functions. Which one gets selected depends on
-// __CLANG_CUDA_APPROX_TRANSCENDENTALS__ which gets defined by clang if
-// -ffast-math or -fcuda-approx-transcendentals are in effect.
-#pragma push_macro("__FAST_OR_SLOW")
-#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __FAST_OR_SLOW(fast, slow) fast
-#else
-#define __FAST_OR_SLOW(fast, slow) slow
-#endif
-
-__DEVICE__ int abs(int __a) { return __nv_abs(__a); }
-__DEVICE__ double fabs(double __a) { return __nv_fabs(__a); }
-__DEVICE__ double acos(double __a) { return __nv_acos(__a); }
-__DEVICE__ float acosf(float __a) { return __nv_acosf(__a); }
-__DEVICE__ double acosh(double __a) { return __nv_acosh(__a); }
-__DEVICE__ float acoshf(float __a) { return __nv_acoshf(__a); }
-__DEVICE__ double asin(double __a) { return __nv_asin(__a); }
-__DEVICE__ float asinf(float __a) { return __nv_asinf(__a); }
-__DEVICE__ double asinh(double __a) { return __nv_asinh(__a); }
-__DEVICE__ float asinhf(float __a) { return __nv_asinhf(__a); }
-__DEVICE__ double atan(double __a) { return __nv_atan(__a); }
-__DEVICE__ double atan2(double __a, double __b) { return __nv_atan2(__a, __b); }
-__DEVICE__ float atan2f(float __a, float __b) { return __nv_atan2f(__a, __b); }
-__DEVICE__ float atanf(float __a) { return __nv_atanf(__a); }
-__DEVICE__ double atanh(double __a) { return __nv_atanh(__a); }
-__DEVICE__ float atanhf(float __a) { return __nv_atanhf(__a); }
-__DEVICE__ double cbrt(double __a) { return __nv_cbrt(__a); }
-__DEVICE__ float cbrtf(float __a) { return __nv_cbrtf(__a); }
-__DEVICE__ double ceil(double __a) { return __nv_ceil(__a); }
-__DEVICE__ float ceilf(float __a) { return __nv_ceilf(__a); }
-__DEVICE__ double copysign(double __a, double __b) {
-  return __nv_copysign(__a, __b);
-}
-__DEVICE__ float copysignf(float __a, float __b) {
-  return __nv_copysignf(__a, __b);
-}
-__DEVICE__ double cos(double __a) { return __nv_cos(__a); }
-__DEVICE__ float cosf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_cosf, __nv_cosf)(__a);
-}
-__DEVICE__ double cosh(double __a) { return __nv_cosh(__a); }
-__DEVICE__ float coshf(float __a) { return __nv_coshf(__a); }
-__DEVICE__ double cospi(double __a) { return __nv_cospi(__a); }
-__DEVICE__ float cospif(float __a) { return __nv_cospif(__a); }
-__DEVICE__ double cyl_bessel_i0(double __a) { return __nv_cyl_bessel_i0(__a); }
-__DEVICE__ float cyl_bessel_i0f(float __a) { return __nv_cyl_bessel_i0f(__a); }
-__DEVICE__ double cyl_bessel_i1(double __a) { return __nv_cyl_bessel_i1(__a); }
-__DEVICE__ float cyl_bessel_i1f(float __a) { return __nv_cyl_bessel_i1f(__a); }
-__DEVICE__ double erf(double __a) { return __nv_erf(__a); }
-__DEVICE__ double erfc(double __a) { return __nv_erfc(__a); }
-__DEVICE__ float erfcf(float __a) { return __nv_erfcf(__a); }
-__DEVICE__ double erfcinv(double __a) { return __nv_erfcinv(__a); }
-__DEVICE__ float erfcinvf(float __a) { return __nv_erfcinvf(__a); }
-__DEVICE__ double erfcx(double __a) { return __nv_erfcx(__a); }
-__DEVICE__ float erfcxf(float __a) { return __nv_erfcxf(__a); }
-__DEVICE__ float erff(float __a) { return __nv_erff(__a); }
-__DEVICE__ double erfinv(double __a) { return __nv_erfinv(__a); }
-__DEVICE__ float erfinvf(float __a) { return __nv_erfinvf(__a); }
-__DEVICE__ double exp(double __a) { return __nv_exp(__a); }
-__DEVICE__ double exp10(double __a) { return __nv_exp10(__a); }
-__DEVICE__ float exp10f(float __a) { return __nv_exp10f(__a); }
-__DEVICE__ double exp2(double __a) { return __nv_exp2(__a); }
-__DEVICE__ float exp2f(float __a) { return __nv_exp2f(__a); }
-__DEVICE__ float expf(float __a) { return __nv_expf(__a); }
-__DEVICE__ double expm1(double __a) { return __nv_expm1(__a); }
-__DEVICE__ float expm1f(float __a) { return __nv_expm1f(__a); }
-__DEVICE__ float fabsf(float __a) { return __nv_fabsf(__a); }
-__DEVICE__ double fdim(double __a, double __b) { return __nv_fdim(__a, __b); }
-__DEVICE__ float fdimf(float __a, float __b) { return __nv_fdimf(__a, __b); }
-__DEVICE__ double fdivide(double __a, double __b) { return __a / __b; }
-__DEVICE__ float fdividef(float __a, float __b) {
-#if __FAST_MATH__ && !__CUDA_PREC_DIV
-  return __nv_fast_fdividef(__a, __b);
-#else
-  return __a / __b;
-#endif
-}
-__DEVICE__ double floor(double __f) { return __nv_floor(__f); }
-__DEVICE__ float floorf(float __f) { return __nv_floorf(__f); }
-__DEVICE__ double fma(double __a, double __b, double __c) {
-  return __nv_fma(__a, __b, __c);
-}
-__DEVICE__ float fmaf(float __a, float __b, float __c) {
-  return __nv_fmaf(__a, __b, __c);
-}
-__DEVICE__ double fmax(double __a, double __b) { return __nv_fmax(__a, __b); }
-__DEVICE__ float fmaxf(float __a, float __b) { return __nv_fmaxf(__a, __b); }
-__DEVICE__ double fmin(double __a, double __b) { return __nv_fmin(__a, __b); }
-__DEVICE__ float fminf(float __a, float __b) { return __nv_fminf(__a, __b); }
-__DEVICE__ double fmod(double __a, double __b) { return __nv_fmod(__a, __b); }
-__DEVICE__ float fmodf(float __a, float __b) { return __nv_fmodf(__a, __b); }
-__DEVICE__ double frexp(double __a, int *__b) { return __nv_frexp(__a, __b); }
-__DEVICE__ float frexpf(float __a, int *__b) { return __nv_frexpf(__a, __b); }
-__DEVICE__ double hypot(double __a, double __b) { return __nv_hypot(__a, __b); }
-__DEVICE__ float hypotf(float __a, float __b) { return __nv_hypotf(__a, __b); }
-__DEVICE__ int ilogb(double __a) { return __nv_ilogb(__a); }
-__DEVICE__ int ilogbf(float __a) { return __nv_ilogbf(__a); }
-__DEVICE__ double j0(double __a) { return __nv_j0(__a); }
-__DEVICE__ float j0f(float __a) { return __nv_j0f(__a); }
-__DEVICE__ double j1(double __a) { return __nv_j1(__a); }
-__DEVICE__ float j1f(float __a) { return __nv_j1f(__a); }
-__DEVICE__ double jn(int __n, double __a) { return __nv_jn(__n, __a); }
-__DEVICE__ float jnf(int __n, float __a) { return __nv_jnf(__n, __a); }
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long labs(long __a) { return __nv_llabs(__a); };
-#else
-__DEVICE__ long labs(long __a) { return __nv_abs(__a); };
-#endif
-__DEVICE__ double ldexp(double __a, int __b) { return __nv_ldexp(__a, __b); }
-__DEVICE__ float ldexpf(float __a, int __b) { return __nv_ldexpf(__a, __b); }
-__DEVICE__ double lgamma(double __a) { return __nv_lgamma(__a); }
-__DEVICE__ float lgammaf(float __a) { return __nv_lgammaf(__a); }
-__DEVICE__ long long llabs(long long __a) { return __nv_llabs(__a); }
-__DEVICE__ long long llmax(long long __a, long long __b) {
-  return __nv_llmax(__a, __b);
-}
-__DEVICE__ long long llmin(long long __a, long long __b) {
-  return __nv_llmin(__a, __b);
-}
-__DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
-__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
-__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
-__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
-__DEVICE__ double log(double __a) { return __nv_log(__a); }
-__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
-__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
-__DEVICE__ double log1p(double __a) { return __nv_log1p(__a); }
-__DEVICE__ float log1pf(float __a) { return __nv_log1pf(__a); }
-__DEVICE__ double log2(double __a) { return __nv_log2(__a); }
-__DEVICE__ float log2f(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_log2f, __nv_log2f)(__a);
-}
-__DEVICE__ double logb(double __a) { return __nv_logb(__a); }
-__DEVICE__ float logbf(float __a) { return __nv_logbf(__a); }
-__DEVICE__ float logf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_logf, __nv_logf)(__a);
-}
-#if defined(__LP64__) || defined(_WIN64)
-__DEVICE__ long lrint(double __a) { return llrint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2ll_rn(__a); }
-__DEVICE__ long lround(double __a) { return llround(__a); }
-__DEVICE__ long lroundf(float __a) { return llroundf(__a); }
-#else
-__DEVICE__ long lrint(double __a) { return (long)rint(__a); }
-__DEVICE__ long lrintf(float __a) { return __float2int_rn(__a); }
-__DEVICE__ long lround(double __a) { return round(__a); }
-__DEVICE__ long lroundf(float __a) { return roundf(__a); }
-#endif
-__DEVICE__ int max(int __a, int __b) { return __nv_max(__a, __b); }
-__DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
-__DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
-__DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
-__DEVICE__ double nextafter(double __a, double __b) {
-  return __nv_nextafter(__a, __b);
-}
-__DEVICE__ float nextafterf(float __a, float __b) {
-  return __nv_nextafterf(__a, __b);
-}
-__DEVICE__ double norm(int __dim, const double *__t) {
-  return __nv_norm(__dim, __t);
-}
-__DEVICE__ double norm3d(double __a, double __b, double __c) {
-  return __nv_norm3d(__a, __b, __c);
-}
-__DEVICE__ float norm3df(float __a, float __b, float __c) {
-  return __nv_norm3df(__a, __b, __c);
-}
-__DEVICE__ double norm4d(double __a, double __b, double __c, double __d) {
-  return __nv_norm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float norm4df(float __a, float __b, float __c, float __d) {
-  return __nv_norm4df(__a, __b, __c, __d);
-}
-__DEVICE__ double normcdf(double __a) { return __nv_normcdf(__a); }
-__DEVICE__ float normcdff(float __a) { return __nv_normcdff(__a); }
-__DEVICE__ double normcdfinv(double __a) { return __nv_normcdfinv(__a); }
-__DEVICE__ float normcdfinvf(float __a) { return __nv_normcdfinvf(__a); }
-__DEVICE__ float normf(int __dim, const float *__t) {
-  return __nv_normf(__dim, __t);
-}
-__DEVICE__ double pow(double __a, double __b) { return __nv_pow(__a, __b); }
-__DEVICE__ float powf(float __a, float __b) { return __nv_powf(__a, __b); }
-__DEVICE__ double powi(double __a, int __b) { return __nv_powi(__a, __b); }
-__DEVICE__ float powif(float __a, int __b) { return __nv_powif(__a, __b); }
-__DEVICE__ double rcbrt(double __a) { return __nv_rcbrt(__a); }
-__DEVICE__ float rcbrtf(float __a) { return __nv_rcbrtf(__a); }
-__DEVICE__ double remainder(double __a, double __b) {
-  return __nv_remainder(__a, __b);
-}
-__DEVICE__ float remainderf(float __a, float __b) {
-  return __nv_remainderf(__a, __b);
-}
-__DEVICE__ double remquo(double __a, double __b, int *__c) {
-  return __nv_remquo(__a, __b, __c);
-}
-__DEVICE__ float remquof(float __a, float __b, int *__c) {
-  return __nv_remquof(__a, __b, __c);
-}
-__DEVICE__ double rhypot(double __a, double __b) {
-  return __nv_rhypot(__a, __b);
-}
-__DEVICE__ float rhypotf(float __a, float __b) {
-  return __nv_rhypotf(__a, __b);
-}
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
-__DEVICE__ double rnorm(int __a, const double *__b) {
-  return __nv_rnorm(__a, __b);
-}
-__DEVICE__ double rnorm3d(double __a, double __b, double __c) {
-  return __nv_rnorm3d(__a, __b, __c);
-}
-__DEVICE__ float rnorm3df(float __a, float __b, float __c) {
-  return __nv_rnorm3df(__a, __b, __c);
-}
-__DEVICE__ double rnorm4d(double __a, double __b, double __c, double __d) {
-  return __nv_rnorm4d(__a, __b, __c, __d);
-}
-__DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
-  return __nv_rnorm4df(__a, __b, __c, __d);
-}
-__DEVICE__ float rnormf(int __dim, const float *__t) {
-  return __nv_rnormf(__dim, __t);
-}
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
-__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
-__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
-__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
-__DEVICE__ float scalbnf(float __a, int __b) { return __nv_scalbnf(__a, __b); }
-__DEVICE__ double scalbln(double __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VAL : -HUGE_VAL;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.0 : -0.0;
-  return scalbn(__a, (int)__b);
-}
-__DEVICE__ float scalblnf(float __a, long __b) {
-  if (__b > INT_MAX)
-    return __a > 0 ? HUGE_VALF : -HUGE_VALF;
-  if (__b < INT_MIN)
-    return __a > 0 ? 0.f : -0.f;
-  return scalbnf(__a, (int)__b);
-}
-__DEVICE__ double sin(double __a) { return __nv_sin(__a); }
-__DEVICE_VOID__ void sincos(double __a, double *__s, double *__c) {
-  return __nv_sincos(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincosf(float __a, float *__s, float *__c) {
-  return __FAST_OR_SLOW(__nv_fast_sincosf, __nv_sincosf)(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincospi(double __a, double *__s, double *__c) {
-  return __nv_sincospi(__a, __s, __c);
-}
-__DEVICE_VOID__ void sincospif(float __a, float *__s, float *__c) {
-  return __nv_sincospif(__a, __s, __c);
-}
-__DEVICE__ float sinf(float __a) {
-  return __FAST_OR_SLOW(__nv_fast_sinf, __nv_sinf)(__a);
-}
-__DEVICE__ double sinh(double __a) { return __nv_sinh(__a); }
-__DEVICE__ float sinhf(float __a) { return __nv_sinhf(__a); }
-__DEVICE__ double sinpi(double __a) { return __nv_sinpi(__a); }
-__DEVICE__ float sinpif(float __a) { return __nv_sinpif(__a); }
-__DEVICE__ double sqrt(double __a) { return __nv_sqrt(__a); }
-__DEVICE__ float sqrtf(float __a) { return __nv_sqrtf(__a); }
-__DEVICE__ double tan(double __a) { return __nv_tan(__a); }
-__DEVICE__ float tanf(float __a) { return __nv_tanf(__a); }
-__DEVICE__ double tanh(double __a) { return __nv_tanh(__a); }
-__DEVICE__ float tanhf(float __a) { return __nv_tanhf(__a); }
-__DEVICE__ double tgamma(double __a) { return __nv_tgamma(__a); }
-__DEVICE__ float tgammaf(float __a) { return __nv_tgammaf(__a); }
-__DEVICE__ double trunc(double __a) { return __nv_trunc(__a); }
-__DEVICE__ float truncf(float __a) { return __nv_truncf(__a); }
-__DEVICE__ unsigned long long ullmax(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmax(__a, __b);
-}
-__DEVICE__ unsigned long long ullmin(unsigned long long __a,
-                                     unsigned long long __b) {
-  return __nv_ullmin(__a, __b);
-}
-__DEVICE__ unsigned int umax(unsigned int __a, unsigned int __b) {
-  return __nv_umax(__a, __b);
-}
-__DEVICE__ unsigned int umin(unsigned int __a, unsigned int __b) {
-  return __nv_umin(__a, __b);
-}
-__DEVICE__ double y0(double __a) { return __nv_y0(__a); }
-__DEVICE__ float y0f(float __a) { return __nv_y0f(__a); }
-__DEVICE__ double y1(double __a) { return __nv_y1(__a); }
-__DEVICE__ float y1f(float __a) { return __nv_y1f(__a); }
-__DEVICE__ double yn(int __a, double __b) { return __nv_yn(__a, __b); }
-__DEVICE__ float ynf(int __a, float __b) { return __nv_ynf(__a, __b); }
-
-#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__DEVICE_VOID__")
-#pragma pop_macro("__FAST_OR_SLOW")
-
-#endif // __CLANG_CUDA_DEVICE_FUNCTIONS_H__
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
deleted file mode 100644
index e1cd49a..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
+++ /dev/null
@@ -1,326 +0,0 @@
-/*===---- __clang_hip_libdevice_declares.h - HIP device library decls -------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
-#define __CLANG_HIP_LIBDEVICE_DECLARES_H__
-
-extern "C" {
-
-// BEGIN FLOAT
-__device__ __attribute__((const)) float __ocml_acos_f32(float);
-__device__ __attribute__((pure)) float __ocml_acosh_f32(float);
-__device__ __attribute__((const)) float __ocml_asin_f32(float);
-__device__ __attribute__((pure)) float __ocml_asinh_f32(float);
-__device__ __attribute__((const)) float __ocml_atan2_f32(float, float);
-__device__ __attribute__((const)) float __ocml_atan_f32(float);
-__device__ __attribute__((pure)) float __ocml_atanh_f32(float);
-__device__ __attribute__((pure)) float __ocml_cbrt_f32(float);
-__device__ __attribute__((const)) float __ocml_ceil_f32(float);
-__device__ __attribute__((const)) __device__ float __ocml_copysign_f32(float,
-                                                                       float);
-__device__ float __ocml_cos_f32(float);
-__device__ float __ocml_native_cos_f32(float);
-__device__ __attribute__((pure)) __device__ float __ocml_cosh_f32(float);
-__device__ float __ocml_cospi_f32(float);
-__device__ float __ocml_i0_f32(float);
-__device__ float __ocml_i1_f32(float);
-__device__ __attribute__((pure)) float __ocml_erfc_f32(float);
-__device__ __attribute__((pure)) float __ocml_erfcinv_f32(float);
-__device__ __attribute__((pure)) float __ocml_erfcx_f32(float);
-__device__ __attribute__((pure)) float __ocml_erf_f32(float);
-__device__ __attribute__((pure)) float __ocml_erfinv_f32(float);
-__device__ __attribute__((pure)) float __ocml_exp10_f32(float);
-__device__ __attribute__((pure)) float __ocml_native_exp10_f32(float);
-__device__ __attribute__((pure)) float __ocml_exp2_f32(float);
-__device__ __attribute__((pure)) float __ocml_exp_f32(float);
-__device__ __attribute__((pure)) float __ocml_native_exp_f32(float);
-__device__ __attribute__((pure)) float __ocml_expm1_f32(float);
-__device__ __attribute__((const)) float __ocml_fabs_f32(float);
-__device__ __attribute__((const)) float __ocml_fdim_f32(float, float);
-__device__ __attribute__((const)) float __ocml_floor_f32(float);
-__device__ __attribute__((const)) float __ocml_fma_f32(float, float, float);
-__device__ __attribute__((const)) float __ocml_fmax_f32(float, float);
-__device__ __attribute__((const)) float __ocml_fmin_f32(float, float);
-__device__ __attribute__((const)) __device__ float __ocml_fmod_f32(float,
-                                                                   float);
-__device__ float __ocml_frexp_f32(float,
-                                  __attribute__((address_space(5))) int *);
-__device__ __attribute__((const)) float __ocml_hypot_f32(float, float);
-__device__ __attribute__((const)) int __ocml_ilogb_f32(float);
-__device__ __attribute__((const)) int __ocml_isfinite_f32(float);
-__device__ __attribute__((const)) int __ocml_isinf_f32(float);
-__device__ __attribute__((const)) int __ocml_isnan_f32(float);
-__device__ float __ocml_j0_f32(float);
-__device__ float __ocml_j1_f32(float);
-__device__ __attribute__((const)) float __ocml_ldexp_f32(float, int);
-__device__ float __ocml_lgamma_f32(float);
-__device__ __attribute__((pure)) float __ocml_log10_f32(float);
-__device__ __attribute__((pure)) float __ocml_native_log10_f32(float);
-__device__ __attribute__((pure)) float __ocml_log1p_f32(float);
-__device__ __attribute__((pure)) float __ocml_log2_f32(float);
-__device__ __attribute__((pure)) float __ocml_native_log2_f32(float);
-__device__ __attribute__((const)) float __ocml_logb_f32(float);
-__device__ __attribute__((pure)) float __ocml_log_f32(float);
-__device__ __attribute__((pure)) float __ocml_native_log_f32(float);
-__device__ float __ocml_modf_f32(float,
-                                 __attribute__((address_space(5))) float *);
-__device__ __attribute__((const)) float __ocml_nearbyint_f32(float);
-__device__ __attribute__((const)) float __ocml_nextafter_f32(float, float);
-__device__ __attribute__((const)) float __ocml_len3_f32(float, float, float);
-__device__ __attribute__((const)) float __ocml_len4_f32(float, float, float,
-                                                        float);
-__device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
-__device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
-__device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
-__device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
-__device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
-__device__ float __ocml_remquo_f32(float, float,
-                                   __attribute__((address_space(5))) int *);
-__device__ __attribute__((const)) float __ocml_rhypot_f32(float, float);
-__device__ __attribute__((const)) float __ocml_rint_f32(float);
-__device__ __attribute__((const)) float __ocml_rlen3_f32(float, float, float);
-__device__ __attribute__((const)) float __ocml_rlen4_f32(float, float, float,
-                                                         float);
-__device__ __attribute__((const)) float __ocml_round_f32(float);
-__device__ __attribute__((pure)) float __ocml_rsqrt_f32(float);
-__device__ __attribute__((const)) float __ocml_scalb_f32(float, float);
-__device__ __attribute__((const)) float __ocml_scalbn_f32(float, int);
-__device__ __attribute__((const)) int __ocml_signbit_f32(float);
-__device__ float __ocml_sincos_f32(float,
-                                   __attribute__((address_space(5))) float *);
-__device__ float __ocml_sincospi_f32(float,
-                                     __attribute__((address_space(5))) float *);
-__device__ float __ocml_sin_f32(float);
-__device__ float __ocml_native_sin_f32(float);
-__device__ __attribute__((pure)) float __ocml_sinh_f32(float);
-__device__ float __ocml_sinpi_f32(float);
-__device__ __attribute__((const)) float __ocml_sqrt_f32(float);
-__device__ __attribute__((const)) float __ocml_native_sqrt_f32(float);
-__device__ float __ocml_tan_f32(float);
-__device__ __attribute__((pure)) float __ocml_tanh_f32(float);
-__device__ float __ocml_tgamma_f32(float);
-__device__ __attribute__((const)) float __ocml_trunc_f32(float);
-__device__ float __ocml_y0_f32(float);
-__device__ float __ocml_y1_f32(float);
-
-// BEGIN INTRINSICS
-__device__ __attribute__((const)) float __ocml_add_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_add_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_add_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_add_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sub_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sub_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sub_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sub_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_mul_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_mul_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_mul_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_mul_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_div_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
-__device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
-__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
-__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
-
-__device__ __attribute__((const)) float
-__llvm_amdgcn_cos_f32(float) __asm("llvm.amdgcn.cos.f32");
-__device__ __attribute__((const)) float
-__llvm_amdgcn_rcp_f32(float) __asm("llvm.amdgcn.rcp.f32");
-__device__ __attribute__((const)) float
-__llvm_amdgcn_rsq_f32(float) __asm("llvm.amdgcn.rsq.f32");
-__device__ __attribute__((const)) float
-__llvm_amdgcn_sin_f32(float) __asm("llvm.amdgcn.sin.f32");
-// END INTRINSICS
-// END FLOAT
-
-// BEGIN DOUBLE
-__device__ __attribute__((const)) double __ocml_acos_f64(double);
-__device__ __attribute__((pure)) double __ocml_acosh_f64(double);
-__device__ __attribute__((const)) double __ocml_asin_f64(double);
-__device__ __attribute__((pure)) double __ocml_asinh_f64(double);
-__device__ __attribute__((const)) double __ocml_atan2_f64(double, double);
-__device__ __attribute__((const)) double __ocml_atan_f64(double);
-__device__ __attribute__((pure)) double __ocml_atanh_f64(double);
-__device__ __attribute__((pure)) double __ocml_cbrt_f64(double);
-__device__ __attribute__((const)) double __ocml_ceil_f64(double);
-__device__ __attribute__((const)) double __ocml_copysign_f64(double, double);
-__device__ double __ocml_cos_f64(double);
-__device__ __attribute__((pure)) double __ocml_cosh_f64(double);
-__device__ double __ocml_cospi_f64(double);
-__device__ double __ocml_i0_f64(double);
-__device__ double __ocml_i1_f64(double);
-__device__ __attribute__((pure)) double __ocml_erfc_f64(double);
-__device__ __attribute__((pure)) double __ocml_erfcinv_f64(double);
-__device__ __attribute__((pure)) double __ocml_erfcx_f64(double);
-__device__ __attribute__((pure)) double __ocml_erf_f64(double);
-__device__ __attribute__((pure)) double __ocml_erfinv_f64(double);
-__device__ __attribute__((pure)) double __ocml_exp10_f64(double);
-__device__ __attribute__((pure)) double __ocml_exp2_f64(double);
-__device__ __attribute__((pure)) double __ocml_exp_f64(double);
-__device__ __attribute__((pure)) double __ocml_expm1_f64(double);
-__device__ __attribute__((const)) double __ocml_fabs_f64(double);
-__device__ __attribute__((const)) double __ocml_fdim_f64(double, double);
-__device__ __attribute__((const)) double __ocml_floor_f64(double);
-__device__ __attribute__((const)) double __ocml_fma_f64(double, double, double);
-__device__ __attribute__((const)) double __ocml_fmax_f64(double, double);
-__device__ __attribute__((const)) double __ocml_fmin_f64(double, double);
-__device__ __attribute__((const)) double __ocml_fmod_f64(double, double);
-__device__ double __ocml_frexp_f64(double,
-                                   __attribute__((address_space(5))) int *);
-__device__ __attribute__((const)) double __ocml_hypot_f64(double, double);
-__device__ __attribute__((const)) int __ocml_ilogb_f64(double);
-__device__ __attribute__((const)) int __ocml_isfinite_f64(double);
-__device__ __attribute__((const)) int __ocml_isinf_f64(double);
-__device__ __attribute__((const)) int __ocml_isnan_f64(double);
-__device__ double __ocml_j0_f64(double);
-__device__ double __ocml_j1_f64(double);
-__device__ __attribute__((const)) double __ocml_ldexp_f64(double, int);
-__device__ double __ocml_lgamma_f64(double);
-__device__ __attribute__((pure)) double __ocml_log10_f64(double);
-__device__ __attribute__((pure)) double __ocml_log1p_f64(double);
-__device__ __attribute__((pure)) double __ocml_log2_f64(double);
-__device__ __attribute__((const)) double __ocml_logb_f64(double);
-__device__ __attribute__((pure)) double __ocml_log_f64(double);
-__device__ double __ocml_modf_f64(double,
-                                  __attribute__((address_space(5))) double *);
-__device__ __attribute__((const)) double __ocml_nearbyint_f64(double);
-__device__ __attribute__((const)) double __ocml_nextafter_f64(double, double);
-__device__ __attribute__((const)) double __ocml_len3_f64(double, double,
-                                                         double);
-__device__ __attribute__((const)) double __ocml_len4_f64(double, double, double,
-                                                         double);
-__device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
-__device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
-__device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
-__device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
-__device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
-__device__ double __ocml_remquo_f64(double, double,
-                                    __attribute__((address_space(5))) int *);
-__device__ __attribute__((const)) double __ocml_rhypot_f64(double, double);
-__device__ __attribute__((const)) double __ocml_rint_f64(double);
-__device__ __attribute__((const)) double __ocml_rlen3_f64(double, double,
-                                                          double);
-__device__ __attribute__((const)) double __ocml_rlen4_f64(double, double,
-                                                          double, double);
-__device__ __attribute__((const)) double __ocml_round_f64(double);
-__device__ __attribute__((pure)) double __ocml_rsqrt_f64(double);
-__device__ __attribute__((const)) double __ocml_scalb_f64(double, double);
-__device__ __attribute__((const)) double __ocml_scalbn_f64(double, int);
-__device__ __attribute__((const)) int __ocml_signbit_f64(double);
-__device__ double __ocml_sincos_f64(double,
-                                    __attribute__((address_space(5))) double *);
-__device__ double
-__ocml_sincospi_f64(double, __attribute__((address_space(5))) double *);
-__device__ double __ocml_sin_f64(double);
-__device__ __attribute__((pure)) double __ocml_sinh_f64(double);
-__device__ double __ocml_sinpi_f64(double);
-__device__ __attribute__((const)) double __ocml_sqrt_f64(double);
-__device__ double __ocml_tan_f64(double);
-__device__ __attribute__((pure)) double __ocml_tanh_f64(double);
-__device__ double __ocml_tgamma_f64(double);
-__device__ __attribute__((const)) double __ocml_trunc_f64(double);
-__device__ double __ocml_y0_f64(double);
-__device__ double __ocml_y1_f64(double);
-
-// BEGIN INTRINSICS
-__device__ __attribute__((const)) double __ocml_add_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_add_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_add_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_add_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sub_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sub_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sub_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sub_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_mul_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_mul_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_mul_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_mul_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_div_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
-                                                            double);
-__device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
-                                                            double);
-__device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
-                                                            double);
-__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
-                                                            double);
-
-__device__ __attribute__((const)) double
-__llvm_amdgcn_rcp_f64(double) __asm("llvm.amdgcn.rcp.f64");
-__device__ __attribute__((const)) double
-__llvm_amdgcn_rsq_f64(double) __asm("llvm.amdgcn.rsq.f64");
-
-__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
-__device__ _Float16 __ocml_cos_f16(_Float16);
-__device__ __attribute__((pure)) _Float16 __ocml_exp_f16(_Float16);
-__device__ __attribute__((pure)) _Float16 __ocml_exp10_f16(_Float16);
-__device__ __attribute__((pure)) _Float16 __ocml_exp2_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __ocml_floor_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __ocml_fma_f16(_Float16, _Float16,
-                                                          _Float16);
-__device__ __attribute__((const)) _Float16 __ocml_fabs_f16(_Float16);
-__device__ __attribute__((const)) int __ocml_isinf_f16(_Float16);
-__device__ __attribute__((const)) int __ocml_isnan_f16(_Float16);
-__device__ __attribute__((pure)) _Float16 __ocml_log_f16(_Float16);
-__device__ __attribute__((pure)) _Float16 __ocml_log10_f16(_Float16);
-__device__ __attribute__((pure)) _Float16 __ocml_log2_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __llvm_amdgcn_rcp_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __ocml_rint_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __ocml_rsqrt_f16(_Float16);
-__device__ _Float16 __ocml_sin_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
-__device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
-
-typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
-typedef short __2i16 __attribute__((ext_vector_type(2)));
-
-__device__ __attribute__((const)) float __ockl_fdot2(__2f16 a, __2f16 b,
-                                                     float c, bool s);
-__device__ __attribute__((const)) __2f16 __ocml_ceil_2f16(__2f16);
-__device__ __attribute__((const)) __2f16 __ocml_fabs_2f16(__2f16);
-__device__ __2f16 __ocml_cos_2f16(__2f16);
-__device__ __attribute__((pure)) __2f16 __ocml_exp_2f16(__2f16);
-__device__ __attribute__((pure)) __2f16 __ocml_exp10_2f16(__2f16);
-__device__ __attribute__((pure)) __2f16 __ocml_exp2_2f16(__2f16);
-__device__ __attribute__((const)) __2f16 __ocml_floor_2f16(__2f16);
-__device__ __attribute__((const))
-__2f16 __ocml_fma_2f16(__2f16, __2f16, __2f16);
-__device__ __attribute__((const)) __2i16 __ocml_isinf_2f16(__2f16);
-__device__ __attribute__((const)) __2i16 __ocml_isnan_2f16(__2f16);
-__device__ __attribute__((pure)) __2f16 __ocml_log_2f16(__2f16);
-__device__ __attribute__((pure)) __2f16 __ocml_log10_2f16(__2f16);
-__device__ __attribute__((pure)) __2f16 __ocml_log2_2f16(__2f16);
-__device__ inline __2f16
-__llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
-{
-  return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
-}
-__device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
-__device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
-__device__ __2f16 __ocml_sin_2f16(__2f16);
-__device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
-__device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
-
-} // extern "C"
-
-#endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_math.h b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
deleted file mode 100644
index cf7014b..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_math.h
+++ /dev/null
@@ -1,1185 +0,0 @@
-/*===---- __clang_hip_math.h - HIP math decls -------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_HIP_MATH_H__
-#define __CLANG_HIP_MATH_H__
-
-#include <algorithm>
-#include <limits.h>
-#include <limits>
-#include <stdint.h>
-
-#pragma push_macro("__DEVICE__")
-#pragma push_macro("__RETURN_TYPE")
-
-// to be consistent with __clang_cuda_math_forward_declares
-#define __DEVICE__ static __device__
-#define __RETURN_TYPE bool
-
-__DEVICE__
-inline uint64_t __make_mantissa_base8(const char *__tagp) {
-  uint64_t __r = 0;
-  while (__tagp) {
-    char __tmp = *__tagp;
-
-    if (__tmp >= '0' && __tmp <= '7')
-      __r = (__r * 8u) + __tmp - '0';
-    else
-      return 0;
-
-    ++__tagp;
-  }
-
-  return __r;
-}
-
-__DEVICE__
-inline uint64_t __make_mantissa_base10(const char *__tagp) {
-  uint64_t __r = 0;
-  while (__tagp) {
-    char __tmp = *__tagp;
-
-    if (__tmp >= '0' && __tmp <= '9')
-      __r = (__r * 10u) + __tmp - '0';
-    else
-      return 0;
-
-    ++__tagp;
-  }
-
-  return __r;
-}
-
-__DEVICE__
-inline uint64_t __make_mantissa_base16(const char *__tagp) {
-  uint64_t __r = 0;
-  while (__tagp) {
-    char __tmp = *__tagp;
-
-    if (__tmp >= '0' && __tmp <= '9')
-      __r = (__r * 16u) + __tmp - '0';
-    else if (__tmp >= 'a' && __tmp <= 'f')
-      __r = (__r * 16u) + __tmp - 'a' + 10;
-    else if (__tmp >= 'A' && __tmp <= 'F')
-      __r = (__r * 16u) + __tmp - 'A' + 10;
-    else
-      return 0;
-
-    ++__tagp;
-  }
-
-  return __r;
-}
-
-__DEVICE__
-inline uint64_t __make_mantissa(const char *__tagp) {
-  if (!__tagp)
-    return 0u;
-
-  if (*__tagp == '0') {
-    ++__tagp;
-
-    if (*__tagp == 'x' || *__tagp == 'X')
-      return __make_mantissa_base16(__tagp);
-    else
-      return __make_mantissa_base8(__tagp);
-  }
-
-  return __make_mantissa_base10(__tagp);
-}
-
-// BEGIN FLOAT
-__DEVICE__
-inline float abs(float __x) { return __ocml_fabs_f32(__x); }
-__DEVICE__
-inline float acosf(float __x) { return __ocml_acos_f32(__x); }
-__DEVICE__
-inline float acoshf(float __x) { return __ocml_acosh_f32(__x); }
-__DEVICE__
-inline float asinf(float __x) { return __ocml_asin_f32(__x); }
-__DEVICE__
-inline float asinhf(float __x) { return __ocml_asinh_f32(__x); }
-__DEVICE__
-inline float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
-__DEVICE__
-inline float atanf(float __x) { return __ocml_atan_f32(__x); }
-__DEVICE__
-inline float atanhf(float __x) { return __ocml_atanh_f32(__x); }
-__DEVICE__
-inline float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
-__DEVICE__
-inline float ceilf(float __x) { return __ocml_ceil_f32(__x); }
-__DEVICE__
-inline float copysignf(float __x, float __y) {
-  return __ocml_copysign_f32(__x, __y);
-}
-__DEVICE__
-inline float cosf(float __x) { return __ocml_cos_f32(__x); }
-__DEVICE__
-inline float coshf(float __x) { return __ocml_cosh_f32(__x); }
-__DEVICE__
-inline float cospif(float __x) { return __ocml_cospi_f32(__x); }
-__DEVICE__
-inline float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
-__DEVICE__
-inline float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
-__DEVICE__
-inline float erfcf(float __x) { return __ocml_erfc_f32(__x); }
-__DEVICE__
-inline float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
-__DEVICE__
-inline float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
-__DEVICE__
-inline float erff(float __x) { return __ocml_erf_f32(__x); }
-__DEVICE__
-inline float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
-__DEVICE__
-inline float exp10f(float __x) { return __ocml_exp10_f32(__x); }
-__DEVICE__
-inline float exp2f(float __x) { return __ocml_exp2_f32(__x); }
-__DEVICE__
-inline float expf(float __x) { return __ocml_exp_f32(__x); }
-__DEVICE__
-inline float expm1f(float __x) { return __ocml_expm1_f32(__x); }
-__DEVICE__
-inline float fabsf(float __x) { return __ocml_fabs_f32(__x); }
-__DEVICE__
-inline float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
-__DEVICE__
-inline float fdividef(float __x, float __y) { return __x / __y; }
-__DEVICE__
-inline float floorf(float __x) { return __ocml_floor_f32(__x); }
-__DEVICE__
-inline float fmaf(float __x, float __y, float __z) {
-  return __ocml_fma_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
-__DEVICE__
-inline float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
-__DEVICE__
-inline float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
-__DEVICE__
-inline float frexpf(float __x, int *__nptr) {
-  int __tmp;
-  float __r =
-      __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
-  *__nptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
-__DEVICE__
-inline int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
-__DEVICE__
-inline __RETURN_TYPE isfinite(float __x) { return __ocml_isfinite_f32(__x); }
-__DEVICE__
-inline __RETURN_TYPE isinf(float __x) { return __ocml_isinf_f32(__x); }
-__DEVICE__
-inline __RETURN_TYPE isnan(float __x) { return __ocml_isnan_f32(__x); }
-__DEVICE__
-inline float j0f(float __x) { return __ocml_j0_f32(__x); }
-__DEVICE__
-inline float j1f(float __x) { return __ocml_j1_f32(__x); }
-__DEVICE__
-inline float jnf(int __n,
-                 float __x) { // TODO: we could use Ahmes multiplication
-                              // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case.
-  if (__n == 0)
-    return j0f(__x);
-  if (__n == 1)
-    return j1f(__x);
-
-  float __x0 = j0f(__x);
-  float __x1 = j1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    float __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-__DEVICE__
-inline float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
-__DEVICE__
-inline float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
-__DEVICE__
-inline long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
-__DEVICE__
-inline long long int llroundf(float __x) { return __ocml_round_f32(__x); }
-__DEVICE__
-inline float log10f(float __x) { return __ocml_log10_f32(__x); }
-__DEVICE__
-inline float log1pf(float __x) { return __ocml_log1p_f32(__x); }
-__DEVICE__
-inline float log2f(float __x) { return __ocml_log2_f32(__x); }
-__DEVICE__
-inline float logbf(float __x) { return __ocml_logb_f32(__x); }
-__DEVICE__
-inline float logf(float __x) { return __ocml_log_f32(__x); }
-__DEVICE__
-inline long int lrintf(float __x) { return __ocml_rint_f32(__x); }
-__DEVICE__
-inline long int lroundf(float __x) { return __ocml_round_f32(__x); }
-__DEVICE__
-inline float modff(float __x, float *__iptr) {
-  float __tmp;
-  float __r =
-      __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
-  *__iptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline float nanf(const char *__tagp) {
-  union {
-    float val;
-    struct ieee_float {
-      uint32_t mantissa : 22;
-      uint32_t quiet : 1;
-      uint32_t exponent : 8;
-      uint32_t sign : 1;
-    } bits;
-
-    static_assert(sizeof(float) == sizeof(ieee_float), "");
-  } __tmp;
-
-  __tmp.bits.sign = 0u;
-  __tmp.bits.exponent = ~0u;
-  __tmp.bits.quiet = 1u;
-  __tmp.bits.mantissa = __make_mantissa(__tagp);
-
-  return __tmp.val;
-}
-__DEVICE__
-inline float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
-__DEVICE__
-inline float nextafterf(float __x, float __y) {
-  return __ocml_nextafter_f32(__x, __y);
-}
-__DEVICE__
-inline float norm3df(float __x, float __y, float __z) {
-  return __ocml_len3_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float norm4df(float __x, float __y, float __z, float __w) {
-  return __ocml_len4_f32(__x, __y, __z, __w);
-}
-__DEVICE__
-inline float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
-__DEVICE__
-inline float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
-__DEVICE__
-inline float
-normf(int __dim,
-      const float *__a) { // TODO: placeholder until OCML adds support.
-  float __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_sqrt_f32(__r);
-}
-__DEVICE__
-inline float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
-__DEVICE__
-inline float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
-__DEVICE__
-inline float remainderf(float __x, float __y) {
-  return __ocml_remainder_f32(__x, __y);
-}
-__DEVICE__
-inline float remquof(float __x, float __y, int *__quo) {
-  int __tmp;
-  float __r = __ocml_remquo_f32(
-      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
-  *__quo = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline float rhypotf(float __x, float __y) {
-  return __ocml_rhypot_f32(__x, __y);
-}
-__DEVICE__
-inline float rintf(float __x) { return __ocml_rint_f32(__x); }
-__DEVICE__
-inline float rnorm3df(float __x, float __y, float __z) {
-  return __ocml_rlen3_f32(__x, __y, __z);
-}
-
-__DEVICE__
-inline float rnorm4df(float __x, float __y, float __z, float __w) {
-  return __ocml_rlen4_f32(__x, __y, __z, __w);
-}
-__DEVICE__
-inline float
-rnormf(int __dim,
-       const float *__a) { // TODO: placeholder until OCML adds support.
-  float __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_rsqrt_f32(__r);
-}
-__DEVICE__
-inline float roundf(float __x) { return __ocml_round_f32(__x); }
-__DEVICE__
-inline float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
-__DEVICE__
-inline float scalblnf(float __x, long int __n) {
-  return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
-                         : __ocml_scalb_f32(__x, __n);
-}
-__DEVICE__
-inline float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
-__DEVICE__
-inline __RETURN_TYPE signbit(float __x) { return __ocml_signbit_f32(__x); }
-__DEVICE__
-inline void sincosf(float __x, float *__sinptr, float *__cosptr) {
-  float __tmp;
-
-  *__sinptr =
-      __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline void sincospif(float __x, float *__sinptr, float *__cosptr) {
-  float __tmp;
-
-  *__sinptr = __ocml_sincospi_f32(
-      __x, (__attribute__((address_space(5))) float *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline float sinf(float __x) { return __ocml_sin_f32(__x); }
-__DEVICE__
-inline float sinhf(float __x) { return __ocml_sinh_f32(__x); }
-__DEVICE__
-inline float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
-__DEVICE__
-inline float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
-__DEVICE__
-inline float tanf(float __x) { return __ocml_tan_f32(__x); }
-__DEVICE__
-inline float tanhf(float __x) { return __ocml_tanh_f32(__x); }
-__DEVICE__
-inline float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
-__DEVICE__
-inline float truncf(float __x) { return __ocml_trunc_f32(__x); }
-__DEVICE__
-inline float y0f(float __x) { return __ocml_y0_f32(__x); }
-__DEVICE__
-inline float y1f(float __x) { return __ocml_y1_f32(__x); }
-__DEVICE__
-inline float ynf(int __n,
-                 float __x) { // TODO: we could use Ahmes multiplication
-                              // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case. Placeholder until OCML adds
-  //       support.
-  if (__n == 0)
-    return y0f(__x);
-  if (__n == 1)
-    return y1f(__x);
-
-  float __x0 = y0f(__x);
-  float __x1 = y1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    float __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-
-// BEGIN INTRINSICS
-__DEVICE__
-inline float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
-__DEVICE__
-inline float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
-__DEVICE__
-inline float __expf(float __x) { return __ocml_native_exp_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fadd_rd(float __x, float __y) {
-  return __ocml_add_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fadd_rn(float __x, float __y) { return __x + __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fadd_ru(float __x, float __y) {
-  return __ocml_add_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fadd_rz(float __x, float __y) {
-  return __ocml_add_rtz_f32(__x, __y);
-}
-__DEVICE__
-inline float __fdiv_rd(float __x, float __y) {
-  return __ocml_div_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fdiv_rn(float __x, float __y) { return __x / __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fdiv_ru(float __x, float __y) {
-  return __ocml_div_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fdiv_rz(float __x, float __y) {
-  return __ocml_div_rtz_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fdividef(float __x, float __y) { return __x / __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fmaf_rd(float __x, float __y, float __z) {
-  return __ocml_fma_rtn_f32(__x, __y, __z);
-}
-#endif
-__DEVICE__
-inline float __fmaf_rn(float __x, float __y, float __z) {
-  return __ocml_fma_f32(__x, __y, __z);
-}
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fmaf_ru(float __x, float __y, float __z) {
-  return __ocml_fma_rtp_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float __fmaf_rz(float __x, float __y, float __z) {
-  return __ocml_fma_rtz_f32(__x, __y, __z);
-}
-__DEVICE__
-inline float __fmul_rd(float __x, float __y) {
-  return __ocml_mul_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fmul_rn(float __x, float __y) { return __x * __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fmul_ru(float __x, float __y) {
-  return __ocml_mul_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fmul_rz(float __x, float __y) {
-  return __ocml_mul_rtz_f32(__x, __y);
-}
-__DEVICE__
-inline float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#endif
-__DEVICE__
-inline float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-__DEVICE__
-inline float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
-#endif
-__DEVICE__
-inline float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
-#endif
-__DEVICE__
-inline float __fsqrt_rn(float __x) { return __ocml_native_sqrt_f32(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
-__DEVICE__
-inline float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
-__DEVICE__
-inline float __fsub_rd(float __x, float __y) {
-  return __ocml_sub_rtn_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __fsub_rn(float __x, float __y) { return __x - __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline float __fsub_ru(float __x, float __y) {
-  return __ocml_sub_rtp_f32(__x, __y);
-}
-__DEVICE__
-inline float __fsub_rz(float __x, float __y) {
-  return __ocml_sub_rtz_f32(__x, __y);
-}
-#endif
-__DEVICE__
-inline float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
-__DEVICE__
-inline float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
-__DEVICE__
-inline float __logf(float __x) { return __ocml_native_log_f32(__x); }
-__DEVICE__
-inline float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
-__DEVICE__
-inline float __saturatef(float __x) {
-  return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x);
-}
-__DEVICE__
-inline void __sincosf(float __x, float *__sinptr, float *__cosptr) {
-  *__sinptr = __ocml_native_sin_f32(__x);
-  *__cosptr = __ocml_native_cos_f32(__x);
-}
-__DEVICE__
-inline float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
-__DEVICE__
-inline float __tanf(float __x) { return __ocml_tan_f32(__x); }
-// END INTRINSICS
-// END FLOAT
-
-// BEGIN DOUBLE
-__DEVICE__
-inline double abs(double __x) { return __ocml_fabs_f64(__x); }
-__DEVICE__
-inline double acos(double __x) { return __ocml_acos_f64(__x); }
-__DEVICE__
-inline double acosh(double __x) { return __ocml_acosh_f64(__x); }
-__DEVICE__
-inline double asin(double __x) { return __ocml_asin_f64(__x); }
-__DEVICE__
-inline double asinh(double __x) { return __ocml_asinh_f64(__x); }
-__DEVICE__
-inline double atan(double __x) { return __ocml_atan_f64(__x); }
-__DEVICE__
-inline double atan2(double __x, double __y) {
-  return __ocml_atan2_f64(__x, __y);
-}
-__DEVICE__
-inline double atanh(double __x) { return __ocml_atanh_f64(__x); }
-__DEVICE__
-inline double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
-__DEVICE__
-inline double ceil(double __x) { return __ocml_ceil_f64(__x); }
-__DEVICE__
-inline double copysign(double __x, double __y) {
-  return __ocml_copysign_f64(__x, __y);
-}
-__DEVICE__
-inline double cos(double __x) { return __ocml_cos_f64(__x); }
-__DEVICE__
-inline double cosh(double __x) { return __ocml_cosh_f64(__x); }
-__DEVICE__
-inline double cospi(double __x) { return __ocml_cospi_f64(__x); }
-__DEVICE__
-inline double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
-__DEVICE__
-inline double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
-__DEVICE__
-inline double erf(double __x) { return __ocml_erf_f64(__x); }
-__DEVICE__
-inline double erfc(double __x) { return __ocml_erfc_f64(__x); }
-__DEVICE__
-inline double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
-__DEVICE__
-inline double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
-__DEVICE__
-inline double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
-__DEVICE__
-inline double exp(double __x) { return __ocml_exp_f64(__x); }
-__DEVICE__
-inline double exp10(double __x) { return __ocml_exp10_f64(__x); }
-__DEVICE__
-inline double exp2(double __x) { return __ocml_exp2_f64(__x); }
-__DEVICE__
-inline double expm1(double __x) { return __ocml_expm1_f64(__x); }
-__DEVICE__
-inline double fabs(double __x) { return __ocml_fabs_f64(__x); }
-__DEVICE__
-inline double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
-__DEVICE__
-inline double floor(double __x) { return __ocml_floor_f64(__x); }
-__DEVICE__
-inline double fma(double __x, double __y, double __z) {
-  return __ocml_fma_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
-__DEVICE__
-inline double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
-__DEVICE__
-inline double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
-__DEVICE__
-inline double frexp(double __x, int *__nptr) {
-  int __tmp;
-  double __r =
-      __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
-  *__nptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline double hypot(double __x, double __y) {
-  return __ocml_hypot_f64(__x, __y);
-}
-__DEVICE__
-inline int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
-__DEVICE__
-inline __RETURN_TYPE isfinite(double __x) { return __ocml_isfinite_f64(__x); }
-__DEVICE__
-inline __RETURN_TYPE isinf(double __x) { return __ocml_isinf_f64(__x); }
-__DEVICE__
-inline __RETURN_TYPE isnan(double __x) { return __ocml_isnan_f64(__x); }
-__DEVICE__
-inline double j0(double __x) { return __ocml_j0_f64(__x); }
-__DEVICE__
-inline double j1(double __x) { return __ocml_j1_f64(__x); }
-__DEVICE__
-inline double jn(int __n,
-                 double __x) { // TODO: we could use Ahmes multiplication
-                               // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case. Placeholder until OCML adds
-  //       support.
-  if (__n == 0)
-    return j0f(__x);
-  if (__n == 1)
-    return j1f(__x);
-
-  double __x0 = j0f(__x);
-  double __x1 = j1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    double __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-__DEVICE__
-inline double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
-__DEVICE__
-inline double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
-__DEVICE__
-inline long long int llrint(double __x) { return __ocml_rint_f64(__x); }
-__DEVICE__
-inline long long int llround(double __x) { return __ocml_round_f64(__x); }
-__DEVICE__
-inline double log(double __x) { return __ocml_log_f64(__x); }
-__DEVICE__
-inline double log10(double __x) { return __ocml_log10_f64(__x); }
-__DEVICE__
-inline double log1p(double __x) { return __ocml_log1p_f64(__x); }
-__DEVICE__
-inline double log2(double __x) { return __ocml_log2_f64(__x); }
-__DEVICE__
-inline double logb(double __x) { return __ocml_logb_f64(__x); }
-__DEVICE__
-inline long int lrint(double __x) { return __ocml_rint_f64(__x); }
-__DEVICE__
-inline long int lround(double __x) { return __ocml_round_f64(__x); }
-__DEVICE__
-inline double modf(double __x, double *__iptr) {
-  double __tmp;
-  double __r =
-      __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
-  *__iptr = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline double nan(const char *__tagp) {
-#if !_WIN32
-  union {
-    double val;
-    struct ieee_double {
-      uint64_t mantissa : 51;
-      uint32_t quiet : 1;
-      uint32_t exponent : 11;
-      uint32_t sign : 1;
-    } bits;
-    static_assert(sizeof(double) == sizeof(ieee_double), "");
-  } __tmp;
-
-  __tmp.bits.sign = 0u;
-  __tmp.bits.exponent = ~0u;
-  __tmp.bits.quiet = 1u;
-  __tmp.bits.mantissa = __make_mantissa(__tagp);
-
-  return __tmp.val;
-#else
-  static_assert(sizeof(uint64_t) == sizeof(double));
-  uint64_t val = __make_mantissa(__tagp);
-  val |= 0xFFF << 51;
-  return *reinterpret_cast<double *>(&val);
-#endif
-}
-__DEVICE__
-inline double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
-__DEVICE__
-inline double nextafter(double __x, double __y) {
-  return __ocml_nextafter_f64(__x, __y);
-}
-__DEVICE__
-inline double
-norm(int __dim,
-     const double *__a) { // TODO: placeholder until OCML adds support.
-  double __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_sqrt_f64(__r);
-}
-__DEVICE__
-inline double norm3d(double __x, double __y, double __z) {
-  return __ocml_len3_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double norm4d(double __x, double __y, double __z, double __w) {
-  return __ocml_len4_f64(__x, __y, __z, __w);
-}
-__DEVICE__
-inline double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
-__DEVICE__
-inline double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
-__DEVICE__
-inline double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
-__DEVICE__
-inline double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
-__DEVICE__
-inline double remainder(double __x, double __y) {
-  return __ocml_remainder_f64(__x, __y);
-}
-__DEVICE__
-inline double remquo(double __x, double __y, int *__quo) {
-  int __tmp;
-  double __r = __ocml_remquo_f64(
-      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
-  *__quo = __tmp;
-
-  return __r;
-}
-__DEVICE__
-inline double rhypot(double __x, double __y) {
-  return __ocml_rhypot_f64(__x, __y);
-}
-__DEVICE__
-inline double rint(double __x) { return __ocml_rint_f64(__x); }
-__DEVICE__
-inline double
-rnorm(int __dim,
-      const double *__a) { // TODO: placeholder until OCML adds support.
-  double __r = 0;
-  while (__dim--) {
-    __r += __a[0] * __a[0];
-    ++__a;
-  }
-
-  return __ocml_rsqrt_f64(__r);
-}
-__DEVICE__
-inline double rnorm3d(double __x, double __y, double __z) {
-  return __ocml_rlen3_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double rnorm4d(double __x, double __y, double __z, double __w) {
-  return __ocml_rlen4_f64(__x, __y, __z, __w);
-}
-__DEVICE__
-inline double round(double __x) { return __ocml_round_f64(__x); }
-__DEVICE__
-inline double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
-__DEVICE__
-inline double scalbln(double __x, long int __n) {
-  return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
-                         : __ocml_scalb_f64(__x, __n);
-}
-__DEVICE__
-inline double scalbn(double __x, int __n) {
-  return __ocml_scalbn_f64(__x, __n);
-}
-__DEVICE__
-inline __RETURN_TYPE signbit(double __x) { return __ocml_signbit_f64(__x); }
-__DEVICE__
-inline double sin(double __x) { return __ocml_sin_f64(__x); }
-__DEVICE__
-inline void sincos(double __x, double *__sinptr, double *__cosptr) {
-  double __tmp;
-  *__sinptr = __ocml_sincos_f64(
-      __x, (__attribute__((address_space(5))) double *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline void sincospi(double __x, double *__sinptr, double *__cosptr) {
-  double __tmp;
-  *__sinptr = __ocml_sincospi_f64(
-      __x, (__attribute__((address_space(5))) double *)&__tmp);
-  *__cosptr = __tmp;
-}
-__DEVICE__
-inline double sinh(double __x) { return __ocml_sinh_f64(__x); }
-__DEVICE__
-inline double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
-__DEVICE__
-inline double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
-__DEVICE__
-inline double tan(double __x) { return __ocml_tan_f64(__x); }
-__DEVICE__
-inline double tanh(double __x) { return __ocml_tanh_f64(__x); }
-__DEVICE__
-inline double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
-__DEVICE__
-inline double trunc(double __x) { return __ocml_trunc_f64(__x); }
-__DEVICE__
-inline double y0(double __x) { return __ocml_y0_f64(__x); }
-__DEVICE__
-inline double y1(double __x) { return __ocml_y1_f64(__x); }
-__DEVICE__
-inline double yn(int __n,
-                 double __x) { // TODO: we could use Ahmes multiplication
-                               // and the Miller & Brown algorithm
-  //       for linear recurrences to get O(log n) steps, but it's unclear if
-  //       it'd be beneficial in this case. Placeholder until OCML adds
-  //       support.
-  if (__n == 0)
-    return j0f(__x);
-  if (__n == 1)
-    return j1f(__x);
-
-  double __x0 = j0f(__x);
-  double __x1 = j1f(__x);
-  for (int __i = 1; __i < __n; ++__i) {
-    double __x2 = (2 * __i) / __x * __x1 - __x0;
-    __x0 = __x1;
-    __x1 = __x2;
-  }
-
-  return __x1;
-}
-
-// BEGIN INTRINSICS
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dadd_rd(double __x, double __y) {
-  return __ocml_add_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __dadd_rn(double __x, double __y) { return __x + __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dadd_ru(double __x, double __y) {
-  return __ocml_add_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __dadd_rz(double __x, double __y) {
-  return __ocml_add_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __ddiv_rd(double __x, double __y) {
-  return __ocml_div_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __ddiv_rn(double __x, double __y) { return __x / __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __ddiv_ru(double __x, double __y) {
-  return __ocml_div_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __ddiv_rz(double __x, double __y) {
-  return __ocml_div_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __dmul_rd(double __x, double __y) {
-  return __ocml_mul_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __dmul_rn(double __x, double __y) { return __x * __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dmul_ru(double __x, double __y) {
-  return __ocml_mul_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __dmul_rz(double __x, double __y) {
-  return __ocml_mul_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-#endif
-__DEVICE__
-inline double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-__DEVICE__
-inline double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
-__DEVICE__
-inline double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
-#endif
-__DEVICE__
-inline double __dsqrt_rn(double __x) { return __ocml_sqrt_f64(__x); }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
-__DEVICE__
-inline double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
-__DEVICE__
-inline double __dsub_rd(double __x, double __y) {
-  return __ocml_sub_rtn_f64(__x, __y);
-}
-#endif
-__DEVICE__
-inline double __dsub_rn(double __x, double __y) { return __x - __y; }
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __dsub_ru(double __x, double __y) {
-  return __ocml_sub_rtp_f64(__x, __y);
-}
-__DEVICE__
-inline double __dsub_rz(double __x, double __y) {
-  return __ocml_sub_rtz_f64(__x, __y);
-}
-__DEVICE__
-inline double __fma_rd(double __x, double __y, double __z) {
-  return __ocml_fma_rtn_f64(__x, __y, __z);
-}
-#endif
-__DEVICE__
-inline double __fma_rn(double __x, double __y, double __z) {
-  return __ocml_fma_f64(__x, __y, __z);
-}
-#if defined OCML_BASIC_ROUNDED_OPERATIONS
-__DEVICE__
-inline double __fma_ru(double __x, double __y, double __z) {
-  return __ocml_fma_rtp_f64(__x, __y, __z);
-}
-__DEVICE__
-inline double __fma_rz(double __x, double __y, double __z) {
-  return __ocml_fma_rtz_f64(__x, __y, __z);
-}
-#endif
-// END INTRINSICS
-// END DOUBLE
-
-// BEGIN INTEGER
-__DEVICE__
-inline int abs(int __x) {
-  int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
-  return (__x ^ __sgn) - __sgn;
-}
-__DEVICE__
-inline long labs(long __x) {
-  long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
-  return (__x ^ __sgn) - __sgn;
-}
-__DEVICE__
-inline long long llabs(long long __x) {
-  long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
-  return (__x ^ __sgn) - __sgn;
-}
-
-#if defined(__cplusplus)
-__DEVICE__
-inline long abs(long __x) { return labs(__x); }
-__DEVICE__
-inline long long abs(long long __x) { return llabs(__x); }
-#endif
-// END INTEGER
-
-__DEVICE__
-inline _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
-  return __ocml_fma_f16(__x, __y, __z);
-}
-
-__DEVICE__
-inline float fma(float __x, float __y, float __z) {
-  return fmaf(__x, __y, __z);
-}
-
-#pragma push_macro("__DEF_FUN1")
-#pragma push_macro("__DEF_FUN2")
-#pragma push_macro("__DEF_FUNI")
-#pragma push_macro("__DEF_FLOAT_FUN2I")
-#pragma push_macro("__HIP_OVERLOAD1")
-#pragma push_macro("__HIP_OVERLOAD2")
-
-// __hip_enable_if::type is a type function which returns __T if __B is true.
-template <bool __B, class __T = void> struct __hip_enable_if {};
-
-template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
-
-// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
-// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
-// floor(double).
-#define __HIP_OVERLOAD1(__retty, __fn)                                         \
-  template <typename __T>                                                      \
-  __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer,    \
-                                      __retty>::type                           \
-  __fn(__T __x) {                                                              \
-    return ::__fn((double)__x);                                                \
-  }
-
-// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
-// or integer argument to avoid compilation error due to ambibuity. e.g.
-// max(5.0f, 6.0) is resolved with max(double, double).
-#define __HIP_OVERLOAD2(__retty, __fn)                                         \
-  template <typename __T1, typename __T2>                                      \
-  __DEVICE__                                                                   \
-      typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&    \
-                                   std::numeric_limits<__T2>::is_specialized,  \
-                               __retty>::type                                  \
-      __fn(__T1 __x, __T2 __y) {                                               \
-    return __fn((double)__x, (double)__y);                                     \
-  }
-
-// Define cmath functions with float argument and returns float.
-#define __DEF_FUN1(__retty, __func)                                            \
-  __DEVICE__                                                                   \
-  inline float __func(float __x) { return __func##f(__x); }                    \
-  __HIP_OVERLOAD1(__retty, __func)
-
-// Define cmath functions with float argument and returns __retty.
-#define __DEF_FUNI(__retty, __func)                                            \
-  __DEVICE__                                                                   \
-  inline __retty __func(float __x) { return __func##f(__x); }                  \
-  __HIP_OVERLOAD1(__retty, __func)
-
-// define cmath functions with two float arguments.
-#define __DEF_FUN2(__retty, __func)                                            \
-  __DEVICE__                                                                   \
-  inline float __func(float __x, float __y) { return __func##f(__x, __y); }    \
-  __HIP_OVERLOAD2(__retty, __func)
-
-__DEF_FUN1(double, acos)
-__DEF_FUN1(double, acosh)
-__DEF_FUN1(double, asin)
-__DEF_FUN1(double, asinh)
-__DEF_FUN1(double, atan)
-__DEF_FUN2(double, atan2);
-__DEF_FUN1(double, atanh)
-__DEF_FUN1(double, cbrt)
-__DEF_FUN1(double, ceil)
-__DEF_FUN2(double, copysign);
-__DEF_FUN1(double, cos)
-__DEF_FUN1(double, cosh)
-__DEF_FUN1(double, erf)
-__DEF_FUN1(double, erfc)
-__DEF_FUN1(double, exp)
-__DEF_FUN1(double, exp2)
-__DEF_FUN1(double, expm1)
-__DEF_FUN1(double, fabs)
-__DEF_FUN2(double, fdim);
-__DEF_FUN1(double, floor)
-__DEF_FUN2(double, fmax);
-__DEF_FUN2(double, fmin);
-__DEF_FUN2(double, fmod);
-//__HIP_OVERLOAD1(int, fpclassify)
-__DEF_FUN2(double, hypot);
-__DEF_FUNI(int, ilogb)
-__HIP_OVERLOAD1(bool, isfinite)
-__HIP_OVERLOAD2(bool, isgreater);
-__HIP_OVERLOAD2(bool, isgreaterequal);
-__HIP_OVERLOAD1(bool, isinf);
-__HIP_OVERLOAD2(bool, isless);
-__HIP_OVERLOAD2(bool, islessequal);
-__HIP_OVERLOAD2(bool, islessgreater);
-__HIP_OVERLOAD1(bool, isnan);
-//__HIP_OVERLOAD1(bool, isnormal)
-__HIP_OVERLOAD2(bool, isunordered);
-__DEF_FUN1(double, lgamma)
-__DEF_FUN1(double, log)
-__DEF_FUN1(double, log10)
-__DEF_FUN1(double, log1p)
-__DEF_FUN1(double, log2)
-__DEF_FUN1(double, logb)
-__DEF_FUNI(long long, llrint)
-__DEF_FUNI(long long, llround)
-__DEF_FUNI(long, lrint)
-__DEF_FUNI(long, lround)
-__DEF_FUN1(double, nearbyint);
-__DEF_FUN2(double, nextafter);
-__DEF_FUN2(double, pow);
-__DEF_FUN2(double, remainder);
-__DEF_FUN1(double, rint);
-__DEF_FUN1(double, round);
-__HIP_OVERLOAD1(bool, signbit)
-__DEF_FUN1(double, sin)
-__DEF_FUN1(double, sinh)
-__DEF_FUN1(double, sqrt)
-__DEF_FUN1(double, tan)
-__DEF_FUN1(double, tanh)
-__DEF_FUN1(double, tgamma)
-__DEF_FUN1(double, trunc);
-
-// define cmath functions with a float and an integer argument.
-#define __DEF_FLOAT_FUN2I(__func)                                              \
-  __DEVICE__                                                                   \
-  inline float __func(float __x, int __y) { return __func##f(__x, __y); }
-__DEF_FLOAT_FUN2I(scalbn)
-
-template <class T> __DEVICE__ inline T min(T __arg1, T __arg2) {
-  return (__arg1 < __arg2) ? __arg1 : __arg2;
-}
-
-template <class T> __DEVICE__ inline T max(T __arg1, T __arg2) {
-  return (__arg1 > __arg2) ? __arg1 : __arg2;
-}
-
-__DEVICE__ inline int min(int __arg1, int __arg2) {
-  return (__arg1 < __arg2) ? __arg1 : __arg2;
-}
-__DEVICE__ inline int max(int __arg1, int __arg2) {
-  return (__arg1 > __arg2) ? __arg1 : __arg2;
-}
-
-__DEVICE__
-inline float max(float __x, float __y) { return fmaxf(__x, __y); }
-
-__DEVICE__
-inline double max(double __x, double __y) { return fmax(__x, __y); }
-
-__DEVICE__
-inline float min(float __x, float __y) { return fminf(__x, __y); }
-
-__DEVICE__
-inline double min(double __x, double __y) { return fmin(__x, __y); }
-
-__HIP_OVERLOAD2(double, max)
-__HIP_OVERLOAD2(double, min)
-
-__host__ inline static int min(int __arg1, int __arg2) {
-  return std::min(__arg1, __arg2);
-}
-
-__host__ inline static int max(int __arg1, int __arg2) {
-  return std::max(__arg1, __arg2);
-}
-
-#pragma pop_macro("__DEF_FUN1")
-#pragma pop_macro("__DEF_FUN2")
-#pragma pop_macro("__DEF_FUNI")
-#pragma pop_macro("__DEF_FLOAT_FUN2I")
-#pragma pop_macro("__HIP_OVERLOAD1")
-#pragma pop_macro("__HIP_OVERLOAD2")
-#pragma pop_macro("__DEVICE__")
-#pragma pop_macro("__RETURN_TYPE")
-
-#endif // __CLANG_HIP_MATH_H__
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h b/linux-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
deleted file mode 100644
index addae56..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*===---- __clang_hip_runtime_wrapper.h - HIP runtime support ---------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-/*
- * WARNING: This header is intended to be directly -include'd by
- * the compiler and is not supposed to be included by users.
- *
- */
-
-#ifndef __CLANG_HIP_RUNTIME_WRAPPER_H__
-#define __CLANG_HIP_RUNTIME_WRAPPER_H__
-
-#if __HIP__
-
-#include <cmath>
-#include <cstdlib>
-#include <stdlib.h>
-
-#define __host__ __attribute__((host))
-#define __device__ __attribute__((device))
-#define __global__ __attribute__((global))
-#define __shared__ __attribute__((shared))
-#define __constant__ __attribute__((constant))
-
-#if __HIP_ENABLE_DEVICE_MALLOC__
-extern "C" __device__ void *__hip_malloc(size_t __size);
-extern "C" __device__ void *__hip_free(void *__ptr);
-static inline __device__ void *malloc(size_t __size) {
-  return __hip_malloc(__size);
-}
-static inline __device__ void *free(void *__ptr) { return __hip_free(__ptr); }
-#else
-static inline __device__ void *malloc(size_t __size) {
-  __builtin_trap();
-  return nullptr;
-}
-static inline __device__ void *free(void *__ptr) {
-  __builtin_trap();
-  return nullptr;
-}
-#endif
-
-#include <__clang_hip_libdevice_declares.h>
-#include <__clang_hip_math.h>
-
-#if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
-#include <__clang_cuda_math_forward_declares.h>
-#include <__clang_cuda_complex_builtins.h>
-
-#include <algorithm>
-#include <complex>
-#include <new>
-#endif // !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
-
-#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
-
-#endif // __HIP__
-#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/linux-x86/lib64/clang/11.0.5/include/altivec.h b/linux-x86/lib64/clang/11.0.5/include/altivec.h
deleted file mode 100644
index 9a40092..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/altivec.h
+++ /dev/null
@@ -1,17154 +0,0 @@
-/*===---- altivec.h - Standard header for type generic math ---------------===*\
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
-\*===----------------------------------------------------------------------===*/
-
-#ifndef __ALTIVEC_H
-#define __ALTIVEC_H
-
-#ifndef __ALTIVEC__
-#error "AltiVec support not enabled"
-#endif
-
-/* Constants for mapping CR6 bits to predicate result. */
-
-#define __CR6_EQ 0
-#define __CR6_EQ_REV 1
-#define __CR6_LT 2
-#define __CR6_LT_REV 3
-
-/* Constants for vec_test_data_class */
-#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0)
-#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 1)
-#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
-                                  __VEC_CLASS_FP_SUBNORMAL_N)
-#define __VEC_CLASS_FP_ZERO_N (1<<2)
-#define __VEC_CLASS_FP_ZERO_P (1<<3)
-#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P           | \
-                             __VEC_CLASS_FP_ZERO_N)
-#define __VEC_CLASS_FP_INFINITY_N (1<<4)
-#define __VEC_CLASS_FP_INFINITY_P (1<<5)
-#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P   | \
-                                 __VEC_CLASS_FP_INFINITY_N)
-#define __VEC_CLASS_FP_NAN (1<<6)
-#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN        | \
-                                   __VEC_CLASS_FP_SUBNORMAL  | \
-                                   __VEC_CLASS_FP_ZERO       | \
-                                   __VEC_CLASS_FP_INFINITY)
-
-#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
-
-#ifdef __POWER9_VECTOR__
-#include <stddef.h>
-#endif
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c);
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c);
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c);
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c);
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_perm(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned char __c);
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c);
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c);
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c);
-
-static __inline__ vector double __ATTRS_o_ai vec_perm(vector double __a,
-                                                      vector double __b,
-                                                      vector unsigned char __c);
-#endif
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b);
-
-/* vec_abs */
-
-#define __builtin_altivec_abs_v16qi vec_abs
-#define __builtin_altivec_abs_v8hi vec_abs
-#define __builtin_altivec_abs_v4si vec_abs
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abs(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(__a, -__a);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abs(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(__a, -__a);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abs(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(__a, -__a);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_abs(vector signed long long __a) {
-  return __builtin_altivec_vmaxsd(__a, -__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvabssp(__a);
-#else
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)(0x7FFFFFFF);
-  return (vector float)__res;
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) {
-  return __builtin_vsx_xvabsdp(__a);
-}
-#endif
-
-/* vec_abss */
-#define __builtin_altivec_abss_v16qi vec_abss
-#define __builtin_altivec_abss_v8hi vec_abss
-#define __builtin_altivec_abss_v4si vec_abss
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_abss(vector signed char __a) {
-  return __builtin_altivec_vmaxsb(
-      __a, __builtin_altivec_vsubsbs((vector signed char)(0), __a));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_abss(vector signed short __a) {
-  return __builtin_altivec_vmaxsh(
-      __a, __builtin_altivec_vsubshs((vector signed short)(0), __a));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_abss(vector signed int __a) {
-  return __builtin_altivec_vmaxsw(
-      __a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
-}
-
-/* vec_absd */
-#if defined(__POWER9_VECTOR__)
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_absd(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vabsdub(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_absd(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vabsduh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_absd(vector unsigned int __a,  vector unsigned int __b) {
-  return __builtin_altivec_vabsduw(__a, __b);
-}
-
-#endif /* End __POWER9_VECTOR__ */
-
-/* vec_add */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_add(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_add(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_add(vector short __a,
-                                                    vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_add(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_add(vector int __a,
-                                                  vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_add(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_add(vector signed long long __a, vector signed long long __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_add(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_add(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
-                                                    vector float __b) {
-  return __a + __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
-                                                     vector double __b) {
-  return __a + __b;
-}
-#endif // __VSX__
-
-/* vec_adde */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_adde(vector signed __int128 __a, vector signed __int128 __b,
-         vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
-         vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-#endif
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_adde(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adde(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_add(vec_add(__a, __b), __carry);
-}
-
-/* vec_addec */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addec(vector signed __int128 __a, vector signed __int128 __b,
-          vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-          vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addec(vector signed int __a, vector signed int __b,
-          vector signed int __c) {
-
-  signed int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempa = (unsigned int) __a[i];
-    unsigned int __tempb = (unsigned int) __b[i];
-    unsigned int __tempc = (unsigned int) __c[i];
-    __tempc = __tempc & 0x00000001;
-    unsigned long long __longa = (unsigned long long) __tempa;
-    unsigned long long __longb = (unsigned long long) __tempb;
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector signed int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addec(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned int __c) {
-
-  unsigned int __result[4];
-  for (int i = 0; i < 4; i++) {
-    unsigned int __tempc = __c[i] & 1;
-    unsigned long long __longa = (unsigned long long) __a[i];
-    unsigned long long __longb = (unsigned long long) __b[i];
-    unsigned long long __longc = (unsigned long long) __tempc;
-    unsigned long long __sum = __longa + __longb + __longc;
-    unsigned long long __res = (__sum >> 32) & 0x01;
-    unsigned long long __tempres = (unsigned int) __res;
-    __result[i] = (signed int) __tempres;
-  }
-
-  vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] };
-  return ret;
-}
-
-#endif
-
-/* vec_vaddubm */
-
-#define __builtin_altivec_vaddubm vec_vaddubm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector signed char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a + __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddubm(vector signed char __a, vector bool char __b) {
-  return __a + (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector unsigned char __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a + __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubm(vector unsigned char __a, vector bool char __b) {
-  return __a + (vector unsigned char)__b;
-}
-
-/* vec_vadduhm */
-
-#define __builtin_altivec_vadduhm vec_vadduhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a + __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vadduhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a + (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a + __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhm(vector unsigned short __a, vector bool short __b) {
-  return __a + (vector unsigned short)__b;
-}
-
-/* vec_vadduwm */
-
-#define __builtin_altivec_vadduwm vec_vadduwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a + __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vadduwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a + (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a + __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduwm(vector unsigned int __a, vector bool int __b) {
-  return __a + (vector unsigned int)__b;
-}
-
-/* vec_vaddfp */
-
-#define __builtin_altivec_vaddfp vec_vaddfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vaddfp(vector float __a, vector float __b) {
-  return __a + __b;
-}
-
-/* vec_addc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_addc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vaddcuw((vector unsigned int)__a,
-                                                      (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_addc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
-  return (vector signed __int128)__builtin_altivec_vaddcuq(
-      (vector unsigned __int128)__a, (vector unsigned __int128)__b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_vaddcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vaddcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vaddcuw(__a, __b);
-}
-
-/* vec_adds */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_adds(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_adds(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_adds(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_adds(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_adds(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_adds(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vaddsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vaddsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vaddsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vaddsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vaddubs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vaddubs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vaddubs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vaddubs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vaddshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vaddshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vaddshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vaddshs(__a, (vector short)__b);
-}
-
-/* vec_vadduhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vadduhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vadduhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vadduhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vaddsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vaddsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vaddsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vaddsws(__a, (vector int)__b);
-}
-
-/* vec_vadduws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vadduws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vadduws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-/* vec_vadduqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vadduqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a + __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a + __b;
-}
-
-/* vec_vaddeuqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddeuqm(__a, __b, __c);
-}
-
-/* vec_vaddcuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vaddcuq(__a, __b);
-}
-
-/* vec_vaddecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vaddecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vaddecuq(__a, __b, __c);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_and */
-
-#define __builtin_altivec_vand vec_and
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_and(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_and(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_and(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_and(vector short __a,
-                                                    vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_and(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_and(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_and(vector int __a,
-                                                  vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_and(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_and(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_and(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_and(vector bool long long __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_and(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_and(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & (vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_and(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_and(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_and(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_vand */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector signed char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vand(vector signed char __a, vector bool char __b) {
-  return __a & (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector unsigned char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vand(vector unsigned char __a, vector bool char __b) {
-  return __a & (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vand(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vand(vector short __a,
-                                                     vector bool short __b) {
-  return __a & (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector unsigned short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vand(vector unsigned short __a, vector bool short __b) {
-  return __a & (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vand(vector bool short __a, vector bool short __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vand(vector int __a,
-                                                   vector bool int __b) {
-  return __a & (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector unsigned int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vand(vector unsigned int __a, vector bool int __b) {
-  return __a & (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vand(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector signed long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vand(vector signed long long __a, vector bool long long __b) {
-  return __a & (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vand(vector unsigned long long __a, vector bool long long __b) {
-  return __a & (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vand(vector bool long long __a, vector bool long long __b) {
-  return __a & __b;
-}
-#endif
-
-/* vec_andc */
-
-#define __builtin_altivec_vandc vec_andc
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_andc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_andc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_andc(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_andc(vector short __a,
-                                                     vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_andc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_andc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_andc(vector int __a,
-                                                   vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_andc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_andc(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector bool long long __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_andc(vector double __a, vector bool long long __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_andc(vector double __a,
-                                                      vector double __b) {
-  vector unsigned long long __res =
-      (vector unsigned long long)__a & ~(vector unsigned long long)__b;
-  return (vector double)__res;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_andc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_andc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_andc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_vandc */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector signed char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a & ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vandc(vector signed char __a, vector bool char __b) {
-  return __a & ~(vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector unsigned char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a & ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vandc(vector unsigned char __a, vector bool char __b) {
-  return __a & ~(vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vandc(vector bool char __a, vector bool char __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector bool short __a,
-                                                      vector short __b) {
-  return (vector short)__a & ~__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vandc(vector short __a,
-                                                      vector bool short __b) {
-  return __a & ~(vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector unsigned short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a & ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vandc(vector unsigned short __a, vector bool short __b) {
-  return __a & ~(vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vandc(vector bool short __a, vector bool short __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                    vector int __b) {
-  return (vector int)__a & ~__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vandc(vector int __a,
-                                                    vector bool int __b) {
-  return __a & ~(vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector unsigned int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a & ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vandc(vector unsigned int __a, vector bool int __b) {
-  return __a & ~(vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                         vector bool int __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector bool int __a,
-                                                      vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vandc(vector float __a,
-                                                      vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a & ~(vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector signed long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a & ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vandc(vector signed long long __a, vector bool long long __b) {
-  return __a & ~(vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a & ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vandc(vector unsigned long long __a, vector bool long long __b) {
-  return __a & ~(vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vandc(vector bool long long __a, vector bool long long __b) {
-  return __a & ~__b;
-}
-#endif
-
-/* vec_avg */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_avg(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_avg(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_avg(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_avg(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_avg(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_avg(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_vavgsb */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vavgsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vavgsb(__a, __b);
-}
-
-/* vec_vavgub */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vavgub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vavgub(__a, __b);
-}
-
-/* vec_vavgsh */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vavgsh(vector short __a, vector short __b) {
-  return __builtin_altivec_vavgsh(__a, __b);
-}
-
-/* vec_vavguh */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vavguh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vavguh(__a, __b);
-}
-
-/* vec_vavgsw */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vavgsw(vector int __a, vector int __b) {
-  return __builtin_altivec_vavgsw(__a, __b);
-}
-
-/* vec_vavguw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vavguw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vavguw(__a, __b);
-}
-
-/* vec_ceil */
-
-static __inline__ vector float __ATTRS_o_ai vec_ceil(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspip(__a);
-#else
-  return __builtin_altivec_vrfip(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) {
-  return __builtin_vsx_xvrdpip(__a);
-}
-#endif
-
-/* vec_vrfip */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfip(vector float __a) {
-  return __builtin_altivec_vrfip(__a);
-}
-
-/* vec_cmpb */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_cmpb(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_vcmpbfp */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vcmpbfp(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp(__a, __b);
-}
-
-/* vec_cmpeq */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpeq(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpeq(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector bool int __a,
-                                                         vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpequd(
-      (vector long long)__a, (vector long long)__b);
-}
-
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpeqsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpeqfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpeq(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpeqdp(__a, __b);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_cmpne */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector bool char __a, vector bool char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
-                                                     (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
-                                                      (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector bool int __a, vector bool int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpne(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector double __a, vector double __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-/* vec_cmpnez */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpnez(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
-                                                      (vector char)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector signed short __a, vector signed short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpnez(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
-                                                       (vector short)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector signed int __a, vector signed int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpnez(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cntlz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vctzlsbb(__a);
-#else
-  return __builtin_altivec_vclzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ signed int __ATTRS_o_ai
-vec_cnttz_lsbb(vector unsigned char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclzlsbb(__a);
-#else
-  return __builtin_altivec_vctzlsbb(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_parity_lsbb(vector signed int __a) {
-  return __builtin_altivec_vprtybw(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_parity_lsbb(vector signed __int128 __a) {
-  return __builtin_altivec_vprtybq(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector unsigned long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_parity_lsbb(vector signed long long __a) {
-  return __builtin_altivec_vprtybd(__a);
-}
-
-#endif
-
-/* vec_cmpgt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpgt(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmpgt(vector short __a,
-                                                           vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpgt(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector int __a,
-                                                         vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpgt(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtsd(__a, __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
-}
-#endif
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgtsp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpgt(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgtdp(__a, __b);
-}
-#endif
-
-/* vec_cmpge */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector signed char __a, vector signed char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmpge(vector unsigned char __a, vector unsigned char __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector signed short __a, vector signed short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmpge(vector unsigned short __a, vector unsigned short __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector signed int __a, vector signed int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmpge(vector unsigned int __a, vector unsigned int __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmpge(vector float __a,
-                                                         vector float __b) {
-#ifdef __VSX__
-  return (vector bool int)__builtin_vsx_xvcmpgesp(__a, __b);
-#else
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector double __a, vector double __b) {
-  return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector signed long long __a, vector signed long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(vec_cmpgt(__b, __a));
-}
-#endif
-
-/* vec_vcmpgefp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgefp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgefp(__a, __b);
-}
-
-/* vec_vcmpgtsb */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtsb(vector signed char __a, vector signed char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtsb(__a, __b);
-}
-
-/* vec_vcmpgtub */
-
-static __inline__ vector bool char __attribute__((__always_inline__))
-vec_vcmpgtub(vector unsigned char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vcmpgtub(__a, __b);
-}
-
-/* vec_vcmpgtsh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtsh(vector short __a, vector short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtsh(__a, __b);
-}
-
-/* vec_vcmpgtuh */
-
-static __inline__ vector bool short __attribute__((__always_inline__))
-vec_vcmpgtuh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vcmpgtuh(__a, __b);
-}
-
-/* vec_vcmpgtsw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtsw(vector int __a, vector int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtsw(__a, __b);
-}
-
-/* vec_vcmpgtuw */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtuw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtuw(__a, __b);
-}
-
-/* vec_vcmpgtfp */
-
-static __inline__ vector bool int __attribute__((__always_inline__))
-vec_vcmpgtfp(vector float __a, vector float __b) {
-  return (vector bool int)__builtin_altivec_vcmpgtfp(__a, __b);
-}
-
-/* vec_cmple */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector signed char __a, vector signed char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmple(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector signed short __a, vector signed short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmple(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector signed int __a, vector signed int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmple(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmple(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpge(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector double __a, vector double __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpge(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpge(__b, __a);
-}
-#endif
-
-/* vec_cmplt */
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector signed char __a, vector signed char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_cmplt(vector unsigned char __a, vector unsigned char __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_cmplt(vector short __a,
-                                                           vector short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_cmplt(vector unsigned short __a, vector unsigned short __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector int __a,
-                                                         vector int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_cmplt(vector unsigned int __a, vector unsigned int __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_cmplt(vector float __a,
-                                                         vector float __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector double __a, vector double __b) {
-  return vec_cmpgt(__b, __a);
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector signed long long __a, vector signed long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_cmpgt(__b, __a);
-}
-
-/* vec_popcnt */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_popcnt(vector signed char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_popcnt(vector unsigned char __a) {
-  return __builtin_altivec_vpopcntb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_popcnt(vector signed short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_popcnt(vector unsigned short __a) {
-  return __builtin_altivec_vpopcnth(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_popcnt(vector signed int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_popcnt(vector unsigned int __a) {
-  return __builtin_altivec_vpopcntw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_popcnt(vector signed long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_popcnt(vector unsigned long long __a) {
-  return __builtin_altivec_vpopcntd(__a);
-}
-
-/* vec_cntlz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cntlz(vector signed char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cntlz(vector unsigned char __a) {
-  return __builtin_altivec_vclzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cntlz(vector signed short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cntlz(vector unsigned short __a) {
-  return __builtin_altivec_vclzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cntlz(vector signed int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cntlz(vector unsigned int __a) {
-  return __builtin_altivec_vclzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cntlz(vector signed long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cntlz(vector unsigned long long __a) {
-  return __builtin_altivec_vclzd(__a);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-
-/* vec_cnttz */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_cnttz(vector signed char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_cnttz(vector unsigned char __a) {
-  return __builtin_altivec_vctzb(__a);
-}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_cnttz(vector signed short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_cnttz(vector unsigned short __a) {
-  return __builtin_altivec_vctzh(__a);
-}
-static __inline__ vector signed int __ATTRS_o_ai
-vec_cnttz(vector signed int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_cnttz(vector unsigned int __a) {
-  return __builtin_altivec_vctzw(__a);
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_cnttz(vector signed long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cnttz(vector unsigned long long __a) {
-  return __builtin_altivec_vctzd(__a);
-}
-
-/* vec_first_match_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_match_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed char __a, vector signed char __b) {
-  /* Compare the result of the comparison of two vectors with either and OR the
-     result. Either the elements are equal or one will equal the comparison
-     result if either is zero.
-  */
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector signed char)__tmp1, __a) |
-                            vec_cmpeq((vector signed char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned char __a,
-                             vector unsigned char __b) {
-  vector bool char __tmp1 = vec_cmpeq(__a, __b);
-  vector bool char __tmp2 = __tmp1 |
-                            vec_cmpeq((vector unsigned char)__tmp1, __a) |
-                            vec_cmpeq((vector unsigned char)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed short __a, vector signed short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector signed short)__tmp1, __a) |
-                             vec_cmpeq((vector signed short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned short __a,
-                             vector unsigned short __b) {
-  vector bool short __tmp1 = vec_cmpeq(__a, __b);
-  vector bool short __tmp2 = __tmp1 |
-                             vec_cmpeq((vector unsigned short)__tmp1, __a) |
-                             vec_cmpeq((vector unsigned short)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 | vec_cmpeq((vector signed int)__tmp1, __a) |
-                           vec_cmpeq((vector signed int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-      vec_cnttz((vector unsigned long long)__tmp2);
-#else
-      vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_match_or_eos_index(vector unsigned int __a, vector unsigned int __b) {
-  vector bool int __tmp1 = vec_cmpeq(__a, __b);
-  vector bool int __tmp2 = __tmp1 |
-                           vec_cmpeq((vector unsigned int)__tmp1, __a) |
-                           vec_cmpeq((vector unsigned int)__tmp1, __b);
-
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)__tmp2);
-#else
-    vec_cntlz((vector unsigned long long)__tmp2);
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed char __a, vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned char __a, vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed short __a, vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned short __a, vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_index(vector unsigned int __a, vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-/* vec_first_mismatch_or_eos_index */
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed char __a,
-                                vector signed char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned char __a,
-                                vector unsigned char __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 3;
-  }
-  return __res[0] >> 3;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed short __a,
-                                vector signed short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned short __a,
-                                vector unsigned short __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 4;
-  }
-  return __res[0] >> 4;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector signed int __a, vector signed int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ unsigned __ATTRS_o_ai
-vec_first_mismatch_or_eos_index(vector unsigned int __a,
-                                vector unsigned int __b) {
-  vector unsigned long long __res =
-#ifdef __LITTLE_ENDIAN__
-    vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
-#else
-    vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
-#endif
-  if (__res[0] == 64) {
-    return (__res[1] + 64) >> 5;
-  }
-  return __res[0] >> 5;
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector double __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp((vector unsigned long long)__a,__b);
-}
-
-static __inline__ vector double  __ATTRS_o_ai
-vec_insert_exp(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_vsx_xviexpdp(__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector float __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp((vector unsigned int)__a,__b);
-}
-
-static __inline__ vector float  __ATTRS_o_ai
-vec_insert_exp(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xviexpsp(__a,__b);
-}
-
-#if defined(__powerpc64__)
-static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
-                                                             size_t __b) {
-  return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len(unsigned char *__a, size_t __b) {
-  return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
-                                                              size_t __b) {
-  return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_len(unsigned short *__a, size_t __b) {
-  return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
-                                                            size_t __b) {
-  return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
-                                                              size_t __b) {
-  return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
-  return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_len(signed __int128 *__a, size_t __b) {
-  return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_len(unsigned __int128 *__a, size_t __b) {
-  return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_len(signed long long *__a, size_t __b) {
-  return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_len(unsigned long long *__a, size_t __b) {
-  return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
-                                                        size_t __b) {
-  return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len_r(unsigned char *__a, size_t __b) {
-  vector unsigned char __res =
-      (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL);
-  __res = (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__res, (vector int)__res, __mask);
-#endif
-  return __res;
-}
-
-// vec_xst_len
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a,
-                                                unsigned char *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a,
-                                                signed char *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a,
-                                                signed short *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a,
-                                                unsigned short *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a,
-                                                signed int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a,
-                                                unsigned int *__b, size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a,
-                                                signed __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a,
-                                                unsigned __int128 *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a,
-                                                signed long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a,
-                                                unsigned long long *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b,
-                                                size_t __c) {
-  return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
-                                                  unsigned char *__b,
-                                                  size_t __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __mask =
-      (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
-  vector unsigned char __res =
-      __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
-  return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
-#else
-  return __builtin_vsx_stxvll((vector int)__a, __b, (__c << 56));
-#endif
-}
-#endif
-#endif
-
-/* vec_cpsgn */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_cpsgn(vector float __a,
-                                                      vector float __b) {
-  return __builtin_vsx_xvcpsgnsp(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
-                                                       vector double __b) {
-  return __builtin_vsx_xvcpsgndp(__a, __b);
-}
-#endif
-
-/* vec_ctf */
-
-#ifdef __VSX__
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)),                     \
-             vector unsigned long long                                         \
-           : (__builtin_convertvector((vector unsigned long long)(__a),        \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)),              \
-             vector signed long long                                           \
-           : (__builtin_convertvector((vector signed long long)(__a),          \
-                                      vector double) *                         \
-              (vector double)(vector unsigned long long)((0x3ffULL - (__b))    \
-                                                         << 52)))
-#else
-#define vec_ctf(__a, __b)                                                      \
-  _Generic((__a), vector int                                                   \
-           : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)),  \
-             vector unsigned int                                               \
-           : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
-                                                   (__b)))
-#endif
-
-/* vec_vcfsx */
-
-#define vec_vcfux __builtin_altivec_vcfux
-
-/* vec_vcfux */
-
-#define vec_vcfsx(__a, __b) __builtin_altivec_vcfsx((vector int)(__a), (__b))
-
-/* vec_cts */
-
-#ifdef __VSX__
-#define vec_cts(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctsxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector signed long long);          \
-           }))
-#else
-#define vec_cts __builtin_altivec_vctsxs
-#endif
-
-/* vec_vctsxs */
-
-#define vec_vctsxs __builtin_altivec_vctsxs
-
-/* vec_ctu */
-
-#ifdef __VSX__
-#define vec_ctu(__a, __b)                                                      \
-  _Generic((__a), vector float                                                 \
-           : __builtin_altivec_vctuxs((vector float)(__a), (__b)),             \
-             vector double                                                     \
-           : __extension__({                                                   \
-             vector double __ret =                                             \
-                 (vector double)(__a) *                                        \
-                 (vector double)(vector unsigned long long)((0x3ffULL + __b)   \
-                                                            << 52);            \
-             __builtin_convertvector(__ret, vector unsigned long long);        \
-           }))
-#else
-#define vec_ctu __builtin_altivec_vctuxs
-#endif
-
-/* vec_vctuxs */
-
-#define vec_vctuxs __builtin_altivec_vctuxs
-
-/* vec_signed */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int, vector signed int, unsigned const int __c);
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signed(vector float __a) {
-  return __builtin_convertvector(__a, vector signed int);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_signed(vector double __a) {
-  return __builtin_convertvector(__a, vector signed long long);
-}
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_signed2(vector double __a, vector double __b) {
-  return (vector signed int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsxws(__a);
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_signedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsxws(__a);
-#else
-  vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_unsigned */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c);
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsigned(vector float __a) {
-  return __builtin_convertvector(__a, vector unsigned int);
-}
-
-#ifdef __VSX__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_unsigned(vector double __a) {
-  return __builtin_convertvector(__a, vector unsigned long long);
-}
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_unsigned2(vector double __a, vector double __b) {
-  return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignede(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpuxws(__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unsignedo(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpuxws(__a);
-#else
-  vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_float */
-
-static __inline__ vector float __ATTRS_o_ai
-vec_sld(vector float, vector float, unsigned const int __c);
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector signed int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float(vector unsigned int __a) {
-  return __builtin_convertvector(__a, vector float);
-}
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector signed long long __a, vector signed long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_float2(vector double __a, vector double __b) {
-  return (vector float) { __a[0], __a[1], __b[0], __b[1] };
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvsxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvuxdsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floate(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#else
-  return __builtin_vsx_xvcvdpsp(__a);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector signed long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector unsigned long long __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxdsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_floato(vector double __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvdpsp(__a);
-#else
-  vector float __ret = __builtin_vsx_xvcvdpsp(__a);
-  return vec_sld(__ret, __ret, 12);
-#endif
-}
-#endif
-
-/* vec_double */
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector signed long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_double(vector unsigned long long __a) {
-  return __builtin_convertvector(__a, vector double);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvsxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvuxwdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublee(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#else
-  return __builtin_vsx_xvcvspdp(__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector signed int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector unsigned int __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleh(vector float __a) {
-  vector double __ret = {__a[0], __a[1]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector signed int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector unsigned int __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doublel(vector float __a) {
-  vector double __ret = {__a[2], __a[3]};
-  return __ret;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector signed int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvsxwdp(__a);
-#else
-  return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector unsigned int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvuxwdp(__a);
-#else
-  return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_doubleo(vector float __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_vsx_xvcvspdp(__a);
-#else
-  return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
-#endif
-}
-#endif
-
-/* vec_div */
-
-/* Integer vector divides (vectors are scalarized, elements divided
-   and the vectors reassembled).
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_div(vector signed char __a, vector signed char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_div(vector unsigned char __a, vector unsigned char __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_div(vector signed short __a, vector signed short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_div(vector unsigned short __a, vector unsigned short __b) {
-  return __a / __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_div(vector signed int __a, vector signed int __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_div(vector unsigned int __a, vector unsigned int __b) {
-  return __a / __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_div(vector signed long long __a, vector signed long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_div(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a / __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_div(vector float __a,
-                                                    vector float __b) {
-  return __a / __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_div(vector double __a,
-                                                     vector double __b) {
-  return __a / __b;
-}
-#endif
-
-/* vec_dss */
-
-#define vec_dss __builtin_altivec_dss
-
-/* vec_dssall */
-
-static __inline__ void __attribute__((__always_inline__)) vec_dssall(void) {
-  __builtin_altivec_dssall();
-}
-
-/* vec_dst */
-#define vec_dst(__PTR, __CW, __STR) \
-  __extension__(                    \
-      { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dstst */
-#define vec_dstst(__PTR, __CW, __STR) \
-  __extension__(                      \
-      { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dststt */
-#define vec_dststt(__PTR, __CW, __STR) \
-  __extension__(                       \
-      { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_dstt */
-#define vec_dstt(__PTR, __CW, __STR) \
-  __extension__(                     \
-      { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
-
-/* vec_eqv */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_eqv(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                  (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_eqv(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                    (vector unsigned int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_eqv(vector bool char __a,
-                                                        vector bool char __b) {
-  return (vector bool char)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                (vector unsigned int)__b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_eqv(vector signed short __a, vector signed short __b) {
-  return (vector signed short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                   (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_eqv(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_eqv(vector bool short __a, vector bool short __b) {
-  return (vector bool short)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_eqv(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                 (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_eqv(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_vsx_xxleqv(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_eqv(vector bool int __a,
-                                                       vector bool int __b) {
-  return (vector bool int)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                               (vector unsigned int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_eqv(vector signed long long __a, vector signed long long __b) {
-  return (vector signed long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_eqv(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__builtin_vsx_xxleqv(
-      (vector unsigned int)__a, (vector unsigned int)__b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_eqv(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                                     (vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_eqv(vector float __a,
-                                                    vector float __b) {
-  return (vector float)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                            (vector unsigned int)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_eqv(vector double __a,
-                                                     vector double __b) {
-  return (vector double)__builtin_vsx_xxleqv((vector unsigned int)__a,
-                                             (vector unsigned int)__b);
-}
-#endif
-
-/* vec_expte */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_expte(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_vexptefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vexptefp(vector float __a) {
-  return __builtin_altivec_vexptefp(__a);
-}
-
-/* vec_floor */
-
-static __inline__ vector float __ATTRS_o_ai vec_floor(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspim(__a);
-#else
-  return __builtin_altivec_vrfim(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) {
-  return __builtin_vsx_xvrdpim(__a);
-}
-#endif
-
-/* vec_vrfim */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfim(vector float __a) {
-  return __builtin_altivec_vrfim(__a);
-}
-
-/* vec_ld */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ld(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ld(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a,
-                                                 const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ld(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a,
-                                                   const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lvx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvx(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvx(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvx(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvx(__a, __b);
-}
-
-/* vec_lde */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lde(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lde(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lde(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lde(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_lvebx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvebx(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvebx(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvebx(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
-}
-
-/* vec_lvehx */
-
-static __inline__ vector short __ATTRS_o_ai vec_lvehx(int __a,
-                                                      const short *__b) {
-  return (vector short)__builtin_altivec_lvehx(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvehx(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
-}
-
-/* vec_lvewx */
-
-static __inline__ vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvewx(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a,
-                                                      const float *__b) {
-  return (vector float)__builtin_altivec_lvewx(__a, __b);
-}
-
-/* vec_ldl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_ldl(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_ldl(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector pixel *__b) {
-  return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a,
-                                                  const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_ldl(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a,
-                                                    const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_lvxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const short *__b) {
-  return (vector short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector pixel *__b) {
-  return (vector pixel)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a,
-                                                   const vector int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
-  return (vector int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const vector float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
-                                                     const float *__b) {
-  return (vector float)__builtin_altivec_lvxl(__a, __b);
-}
-
-/* vec_loge */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_loge(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_vlogefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vlogefp(vector float __a) {
-  return __builtin_altivec_vlogefp(__a);
-}
-
-/* vec_lvsl */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsl(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsl(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsl(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsl(__a, __b);
-}
-#endif
-
-/* vec_lvsr */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const signed char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const signed char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned char *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned short *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned short *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const unsigned int *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvsr(int __a, const unsigned int *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector unsigned char __ATTRS_o_ai
-    __attribute__((__deprecated__("use assignment for unaligned little endian \
-loads/stores"))) vec_lvsr(int __a, const float *__b) {
-  vector unsigned char mask =
-      (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-  vector unsigned char reverse = {15, 14, 13, 12, 11, 10, 9, 8,
-                                  7,  6,  5,  4,  3,  2,  1, 0};
-  return vec_perm(mask, mask, reverse);
-}
-#else
-static __inline__ vector unsigned char __ATTRS_o_ai vec_lvsr(int __a,
-                                                             const float *__b) {
-  return (vector unsigned char)__builtin_altivec_lvsr(__a, __b);
-}
-#endif
-
-/* vec_madd */
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector signed short, vector signed short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector signed short, vector unsigned short, vector unsigned short);
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector signed short, vector signed short);
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short, vector unsigned short, vector unsigned short);
-
-static __inline__ vector signed short __ATTRS_o_ai vec_madd(
-    vector signed short __a, vector signed short __b, vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector signed short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector signed short __b,
-         vector signed short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_madd(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return vec_mladd(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_madd(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaddasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_madd(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vmaddfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaddfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vmaddfp(__a, __b, __c);
-}
-
-/* vec_madds */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_madds(vector signed short __a, vector signed short __b,
-          vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_vmhaddshs */
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vmhaddshs(vector signed short __a, vector signed short __b,
-              vector signed short __c) {
-  return __builtin_altivec_vmhaddshs(__a, __b, __c);
-}
-
-/* vec_msub */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_msub(vector float __a,
-                                                     vector float __b,
-                                                     vector float __c) {
-  return __builtin_vsx_xvmsubasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_msub(vector double __a,
-                                                      vector double __b,
-                                                      vector double __c) {
-  return __builtin_vsx_xvmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_max */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_max(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_max(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_max(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_max(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_max(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_max(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vmaxsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_max(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vmaxud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_max(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vmaxud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_max(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_max(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmaxdp(__a, __b);
-}
-#endif
-
-/* vec_vmaxsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vmaxsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmaxsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxsb(__a, (vector signed char)__b);
-}
-
-/* vec_vmaxub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vmaxub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmaxub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vmaxub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vmaxsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vmaxsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmaxsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vmaxsh(__a, (vector short)__b);
-}
-
-/* vec_vmaxuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vmaxuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmaxuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vmaxuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vmaxsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vmaxsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vmaxsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vmaxsw(__a, (vector int)__b);
-}
-
-/* vec_vmaxuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vmaxuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmaxuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vmaxuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vmaxfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vmaxfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvmaxsp(__a, __b);
-#else
-  return __builtin_altivec_vmaxfp(__a, __b);
-#endif
-}
-
-/* vec_mergeh */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergeh(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergeh(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergeh(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergeh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergeh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergeh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergeh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergeh(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeh(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeh(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergeh(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_mergeh(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeh(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
-                                         0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-#endif
-
-/* vec_vmrghb */
-
-#define __builtin_altivec_vmrghb vec_vmrghb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrghb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrghb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrghb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x10, 0x01, 0x11, 0x02, 0x12,
-                                         0x03, 0x13, 0x04, 0x14, 0x05, 0x15,
-                                         0x06, 0x16, 0x07, 0x17));
-}
-
-/* vec_vmrghh */
-
-#define __builtin_altivec_vmrghh vec_vmrghh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrghh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrghh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrghh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrghh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x10, 0x11, 0x02, 0x03,
-                                         0x12, 0x13, 0x04, 0x05, 0x14, 0x15,
-                                         0x06, 0x07, 0x16, 0x17));
-}
-
-/* vec_vmrghw */
-
-#define __builtin_altivec_vmrghw vec_vmrghw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrghw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrghw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrghw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrghw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x04, 0x05, 0x06, 0x07,
-                                         0x14, 0x15, 0x16, 0x17));
-}
-
-/* vec_mergel */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mergel(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mergel(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_mergel(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mergel(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mergel(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_mergel(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_mergel(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mergel(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergel(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergel(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_mergel(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector signed long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector signed long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector signed long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector signed long long __b) {
-  return vec_perm((vector signed long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector unsigned long long __a, vector bool long long __b) {
-  return vec_perm(__a, (vector unsigned long long)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector unsigned long long __b) {
-  return vec_perm((vector unsigned long long)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector bool long long __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai vec_mergel(vector double __a,
-                                                        vector double __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector double __a, vector bool long long __b) {
-  return vec_perm(__a, (vector double)__b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-static __inline__ vector double __ATTRS_o_ai
-vec_mergel(vector bool long long __a, vector double __b) {
-  return vec_perm((vector double)__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
-                                         0x0E, 0x0F, 0x18, 0X19, 0x1A, 0x1B,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-#endif
-
-/* vec_vmrglb */
-
-#define __builtin_altivec_vmrglb vec_vmrglb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vmrglb(vector signed char __a, vector signed char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vmrglb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vmrglb(vector bool char __a, vector bool char __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A,
-                                         0x0B, 0x1B, 0x0C, 0x1C, 0x0D, 0x1D,
-                                         0x0E, 0x1E, 0x0F, 0x1F));
-}
-
-/* vec_vmrglh */
-
-#define __builtin_altivec_vmrglh vec_vmrglh
-
-static __inline__ vector short __ATTRS_o_ai vec_vmrglh(vector short __a,
-                                                       vector short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmrglh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vmrglh(vector bool short __a, vector bool short __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vmrglh(vector pixel __a,
-                                                       vector pixel __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x1C, 0x1D,
-                                         0x0E, 0x0F, 0x1E, 0x1F));
-}
-
-/* vec_vmrglw */
-
-#define __builtin_altivec_vmrglw vec_vmrglw
-
-static __inline__ vector int __ATTRS_o_ai vec_vmrglw(vector int __a,
-                                                     vector int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vmrglw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vmrglw(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vmrglw(vector float __a,
-                                                       vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19,
-                                         0x1A, 0x1B, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-#ifdef __POWER8_VECTOR__
-/* vec_mergee */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergee(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergee(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergee(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergee(vector bool long long __a, vector bool long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergee(vector signed long long __a, vector signed long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergee(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergeh(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergee(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
-                                         0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
-                                         0x18, 0x19, 0x1A, 0x1B));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergee(vector double __a, vector double __b) {
-  return vec_mergeh(__a, __b);
-}
-
-/* vec_mergeo */
-
-static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a,
-                                                          vector bool int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mergeo(vector signed int __a, vector signed int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_mergeo(vector bool long long __a, vector bool long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mergeo(vector signed long long __a, vector signed long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mergeo(vector unsigned long long __a, vector unsigned long long __b) {
-  return vec_mergel(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_mergeo(vector float __a, vector float __b) {
-  return vec_perm(__a, __b,
-                  (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
-                                         0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
-                                         0x1C, 0x1D, 0x1E, 0x1F));
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_mergeo(vector double __a, vector double __b) {
-  return vec_mergel(__a, __b);
-}
-
-#endif
-
-/* vec_mfvscr */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_mfvscr(void) {
-  return __builtin_altivec_mfvscr();
-}
-
-/* vec_min */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_min(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_min(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector bool short __a,
-                                                    vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_min(vector short __a,
-                                                    vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_min(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector bool int __a,
-                                                  vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_min(vector int __a,
-                                                  vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_min(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector signed long long __b) {
-  return __builtin_altivec_vminsd((vector signed long long)__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_min(vector signed long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminsd(__a, (vector signed long long)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector bool long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vminud((vector unsigned long long)__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_min(vector unsigned long long __a, vector bool long long __b) {
-  return __builtin_altivec_vminud(__a, (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_min(vector float __a,
-                                                    vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_min(vector double __a,
-                                                     vector double __b) {
-  return __builtin_vsx_xvmindp(__a, __b);
-}
-#endif
-
-/* vec_vminsb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vminsb((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vminsb(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vminsb(__a, (vector signed char)__b);
-}
-
-/* vec_vminub */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vminub((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vminub(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vminub(__a, (vector unsigned char)__b);
-}
-
-/* vec_vminsh */
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector bool short __a,
-                                                       vector short __b) {
-  return __builtin_altivec_vminsh((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vminsh(vector short __a,
-                                                       vector bool short __b) {
-  return __builtin_altivec_vminsh(__a, (vector short)__b);
-}
-
-/* vec_vminuh */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vminuh((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vminuh(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vminuh(__a, (vector unsigned short)__b);
-}
-
-/* vec_vminsw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector bool int __a,
-                                                     vector int __b) {
-  return __builtin_altivec_vminsw((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vminsw(vector int __a,
-                                                     vector bool int __b) {
-  return __builtin_altivec_vminsw(__a, (vector int)__b);
-}
-
-/* vec_vminuw */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vminuw((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vminuw(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vminuw(__a, (vector unsigned int)__b);
-}
-
-/* vec_vminfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vminfp(vector float __a, vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvminsp(__a, __b);
-#else
-  return __builtin_altivec_vminfp(__a, __b);
-#endif
-}
-
-/* vec_mladd */
-
-#define __builtin_altivec_vmladduhm vec_mladd
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_mladd(vector unsigned short __a,
-                                                      vector short __b,
-                                                      vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mladd(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_vmladduhm */
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(vector short __a,
-                                                          vector short __b,
-                                                          vector short __c) {
-  return __a * __b + __c;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vmladduhm(
-    vector short __a, vector unsigned short __b, vector unsigned short __c) {
-  return __a * (vector short)__b + (vector short)__c;
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector short __b, vector short __c) {
-  return (vector short)__a * __b + __c;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vmladduhm(vector unsigned short __a, vector unsigned short __b,
-              vector unsigned short __c) {
-  return __a * __b + __c;
-}
-
-/* vec_mradds */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_mradds(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_vmhraddshs */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmhraddshs(vector short __a, vector short __b, vector short __c) {
-  return __builtin_altivec_vmhraddshs(__a, __b, __c);
-}
-
-/* vec_msum */
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector signed char __a,
-                                                   vector unsigned char __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_msum(vector short __a,
-                                                   vector short __b,
-                                                   vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msum(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_vmsummbm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsummbm(vector signed char __a, vector unsigned char __b, vector int __c) {
-  return __builtin_altivec_vmsummbm(__a, __b, __c);
-}
-
-/* vec_vmsumubm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumubm(vector unsigned char __a, vector unsigned char __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumubm(__a, __b, __c);
-}
-
-/* vec_vmsumshm */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshm(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshm(__a, __b, __c);
-}
-
-/* vec_vmsumuhm */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhm(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhm(__a, __b, __c);
-}
-
-/* vec_msums */
-
-static __inline__ vector int __ATTRS_o_ai vec_msums(vector short __a,
-                                                    vector short __b,
-                                                    vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_msums(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_vmsumshs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmsumshs(vector short __a, vector short __b, vector int __c) {
-  return __builtin_altivec_vmsumshs(__a, __b, __c);
-}
-
-/* vec_vmsumuhs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmsumuhs(vector unsigned short __a, vector unsigned short __b,
-             vector unsigned int __c) {
-  return __builtin_altivec_vmsumuhs(__a, __b, __c);
-}
-
-/* vec_mtvscr */
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector signed char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool char __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool short __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector pixel __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector unsigned int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector bool int __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-static __inline__ void __ATTRS_o_ai vec_mtvscr(vector float __a) {
-  __builtin_altivec_mtvscr((vector int)__a);
-}
-
-/* vec_mul */
-
-/* Integer vector multiplication will involve multiplication of the odd/even
-   elements separately, then truncating the results and moving to the
-   result vector.
-*/
-static __inline__ vector signed char __ATTRS_o_ai
-vec_mul(vector signed char __a, vector signed char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_mul(vector unsigned char __a, vector unsigned char __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_mul(vector signed short __a, vector signed short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mul(vector unsigned short __a, vector unsigned short __b) {
-  return __a * __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_mul(vector signed int __a, vector signed int __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mul(vector unsigned int __a, vector unsigned int __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mul(vector signed long long __a, vector signed long long __b) {
-  return __a * __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mul(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a * __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_mul(vector float __a,
-                                                    vector float __b) {
-  return __a * __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_mul(vector double __a,
-                                                     vector double __b) {
-  return __a * __b;
-}
-#endif
-
-/* The vmulos* and vmules* instructions have a big endian bias, so
-   we must reverse the meaning of "even" and "odd" for little endian.  */
-
-/* vec_mule */
-
-static __inline__ vector short __ATTRS_o_ai vec_mule(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mule(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mule(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mule(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mule(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosw(__a, __b);
-#else
-  return __builtin_altivec_vmulesw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mule(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouw(__a, __b);
-#else
-  return __builtin_altivec_vmuleuw(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulesb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulesb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosb(__a, __b);
-#else
-  return __builtin_altivec_vmulesb(__a, __b);
-#endif
-}
-
-/* vec_vmuleub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuleub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuloub(__a, __b);
-#else
-  return __builtin_altivec_vmuleub(__a, __b);
-#endif
-}
-
-/* vec_vmulesh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulesh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulosh(__a, __b);
-#else
-  return __builtin_altivec_vmulesh(__a, __b);
-#endif
-}
-
-/* vec_vmuleuh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmuleuh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulouh(__a, __b);
-#else
-  return __builtin_altivec_vmuleuh(__a, __b);
-#endif
-}
-
-/* vec_mulo */
-
-static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
-                                                     vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_mulo(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_mulo(vector short __a,
-                                                   vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_mulo(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_mulo(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesw(__a, __b);
-#else
-  return __builtin_altivec_vmulosw(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_mulo(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuw(__a, __b);
-#else
-  return __builtin_altivec_vmulouw(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vmulosb */
-
-static __inline__ vector short __attribute__((__always_inline__))
-vec_vmulosb(vector signed char __a, vector signed char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesb(__a, __b);
-#else
-  return __builtin_altivec_vmulosb(__a, __b);
-#endif
-}
-
-/* vec_vmuloub */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vmuloub(vector unsigned char __a, vector unsigned char __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleub(__a, __b);
-#else
-  return __builtin_altivec_vmuloub(__a, __b);
-#endif
-}
-
-/* vec_vmulosh */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vmulosh(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmulesh(__a, __b);
-#else
-  return __builtin_altivec_vmulosh(__a, __b);
-#endif
-}
-
-/* vec_vmulouh */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vmulouh(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vmuleuh(__a, __b);
-#else
-  return __builtin_altivec_vmulouh(__a, __b);
-#endif
-}
-
-/*  vec_nand */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector signed char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector signed char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector unsigned char __a, vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nand(vector bool char __a, vector unsigned char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector signed short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector signed short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nand(vector unsigned short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nand(vector bool short __a, vector bool short __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector signed int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
-                                                          vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector signed int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector unsigned int __a, vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nand(vector bool int __a, vector unsigned int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_nand(vector float __a, vector float __b) {
-  return (vector float)(~((vector unsigned int)__a &
-                          (vector unsigned int)__b));
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector signed long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector signed long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector unsigned long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector unsigned long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nand(vector bool long long __a, vector bool long long __b) {
-  return ~(__a & __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_nand(vector double __a, vector double __b) {
-  return (vector double)(~((vector unsigned long long)__a &
-                           (vector unsigned long long)__b));
-}
-
-#endif
-
-/* vec_nmadd */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_nmadd(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-  return __builtin_vsx_xvnmaddasp(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nmadd(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmaddadp(__a, __b, __c);
-}
-#endif
-
-/* vec_nmsub */
-
-static __inline__ vector float __ATTRS_o_ai vec_nmsub(vector float __a,
-                                                      vector float __b,
-                                                      vector float __c) {
-#ifdef __VSX__
-  return __builtin_vsx_xvnmsubasp(__a, __b, __c);
-#else
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nmsub(vector double __a,
-                                                       vector double __b,
-                                                       vector double __c) {
-  return __builtin_vsx_xvnmsubadp(__a, __b, __c);
-}
-#endif
-
-/* vec_vnmsubfp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vnmsubfp(vector float __a, vector float __b, vector float __c) {
-  return __builtin_altivec_vnmsubfp(__a, __b, __c);
-}
-
-/* vec_nor */
-
-#define __builtin_altivec_vnor vec_nor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_nor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_nor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_nor(vector bool char __a,
-                                                        vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_nor(vector short __a,
-                                                    vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_nor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_nor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_nor(vector int __a,
-                                                  vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_nor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_nor(vector bool int __a,
-                                                       vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_nor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_nor(vector double __a,
-                                                     vector double __b) {
-  vector unsigned long long __res =
-      ~((vector unsigned long long)__a | (vector unsigned long long)__b);
-  return (vector double)__res;
-}
-#endif
-
-/* vec_vnor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vnor(vector signed char __a, vector signed char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vnor(vector unsigned char __a, vector unsigned char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vnor(vector bool char __a,
-                                                         vector bool char __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vnor(vector short __a,
-                                                     vector short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vnor(vector unsigned short __a, vector unsigned short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vnor(vector bool short __a, vector bool short __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vnor(vector int __a,
-                                                   vector int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vnor(vector unsigned int __a, vector unsigned int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vnor(vector bool int __a,
-                                                        vector bool int __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vnor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      ~((vector unsigned int)__a | (vector unsigned int)__b);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_nor(vector signed long long __a, vector signed long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_nor(vector unsigned long long __a, vector unsigned long long __b) {
-  return ~(__a | __b);
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_nor(vector bool long long __a, vector bool long long __b) {
-  return ~(__a | __b);
-}
-#endif
-
-/* vec_or */
-
-#define __builtin_altivec_vor vec_or
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_or(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai vec_or(vector signed char __a,
-                                                         vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_or(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_or(vector bool char __a,
-                                                       vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                   vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_or(vector short __a,
-                                                   vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_or(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_or(vector bool short __a,
-                                                        vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                 vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_or(vector int __a,
-                                                 vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_or(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_or(vector bool int __a,
-                                                      vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector bool int __a,
-                                                   vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_or(vector float __a,
-                                                   vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_or(vector bool long long __a,
-                                                    vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_or(vector double __a,
-                                                    vector double __b) {
-  return (vector double)((vector unsigned long long)__a |
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_or(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_or(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_or(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector signed char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector signed char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector unsigned char __a, vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_orc(vector bool char __a, vector unsigned char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector signed short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector signed short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector unsigned short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector unsigned short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_orc(vector bool short __a, vector bool short __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector signed int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
-                                                         vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector signed int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector unsigned int __a, vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_orc(vector bool int __a, vector unsigned int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_orc(vector float __a, vector bool int __b) {
-  return (vector float)((vector unsigned int)__a | ~__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector signed long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector signed long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector unsigned long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector unsigned long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector bool long long __b) {
-  return __a | ~__b;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a | ~__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_orc(vector bool long long __a, vector double __b) {
-  return (vector double)(__a | ~(vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector signed char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a | __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vor(vector signed char __a, vector bool char __b) {
-  return __a | (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector unsigned char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a | __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vor(vector unsigned char __a, vector bool char __b) {
-  return __a | (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a | __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vor(vector short __a,
-                                                    vector bool short __b) {
-  return __a | (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector unsigned short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a | __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vor(vector unsigned short __a, vector bool short __b) {
-  return __a | (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vor(vector bool short __a, vector bool short __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a | __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vor(vector int __a,
-                                                  vector bool int __b) {
-  return __a | (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector unsigned int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a | __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vor(vector unsigned int __a, vector bool int __b) {
-  return __a | (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a | __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a | (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector signed long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a | __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vor(vector signed long long __a, vector bool long long __b) {
-  return __a | (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a | __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vor(vector unsigned long long __a, vector bool long long __b) {
-  return __a | (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vor(vector bool long long __a, vector bool long long __b) {
-  return __a | __b;
-}
-#endif
-
-/* vec_pack */
-
-/* The various vector pack instructions have a big-endian bias, so for
-   little endian we must handle reversed element numbering.  */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_pack(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_pack(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_pack(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_pack(vector int __a,
-                                                     vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_pack(vector bool int __a,
-                                                          vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector signed int __ATTRS_o_ai
-vec_pack(vector signed long long __a, vector signed long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector signed int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_pack(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_pack(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_pack(vector double __a, vector double __b) {
-  return (vector float) (__a[0], __a[1], __b[0], __b[1]);
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_pack_to_short_fp32(vector float __a, vector float __b) {
-  vector float __resa = __builtin_vsx_xvcvsphp(__a);
-  vector float __resb = __builtin_vsx_xvcvsphp(__b);
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_mergee(__resa, __resb);
-#else
-  return (vector unsigned short)vec_mergeo(__resa, __resb);
-#endif
-}
-
-#endif
-/* vec_vpkuhum */
-
-#define __builtin_altivec_vpkuhum vec_vpkuhum
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vpkuhum(vector signed short __a, vector signed short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector signed char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkuhum(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector unsigned char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vpkuhum(vector bool short __a, vector bool short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E,
-                             0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E));
-#else
-  return (vector bool char)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
-                             0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
-#endif
-}
-
-/* vec_vpkuwum */
-
-#define __builtin_altivec_vpkuwum vec_vpkuwum
-
-static __inline__ vector short __ATTRS_o_ai vec_vpkuwum(vector int __a,
-                                                        vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkuwum(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector unsigned short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vpkuwum(vector bool int __a, vector bool int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x04, 0x05, 0x08, 0x09, 0x0C, 0x0D,
-                             0x10, 0x11, 0x14, 0x15, 0x18, 0x19, 0x1C, 0x1D));
-#else
-  return (vector bool short)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
-                             0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
-#endif
-}
-
-/* vec_vpkudum */
-
-#ifdef __POWER8_VECTOR__
-#define __builtin_altivec_vpkudum vec_vpkudum
-
-static __inline__ vector int __ATTRS_o_ai vec_vpkudum(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpkudum(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector unsigned int)vec_perm(
-      __a, __b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vpkudum(vector bool long long __a, vector bool long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0A, 0x0B,
-                             0x10, 0x11, 0x12, 0x13, 0x18, 0x19, 0x1A, 0x1B));
-#else
-  return (vector bool int)vec_perm(
-      (vector long long)__a, (vector long long)__b,
-      (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D, 0x0E, 0x0F,
-                             0x14, 0x15, 0x16, 0x17, 0x1C, 0x1D, 0x1E, 0x1F));
-#endif
-}
-#endif
-
-/* vec_packpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_packpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_vpkpx */
-
-static __inline__ vector pixel __attribute__((__always_inline__))
-vec_vpkpx(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector pixel)__builtin_altivec_vpkpx(__b, __a);
-#else
-  return (vector pixel)__builtin_altivec_vpkpx(__a, __b);
-#endif
-}
-
-/* vec_packs */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_packs(vector short __a,
-                                                            vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packs(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_packs(vector int __a,
-                                                             vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packs(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_packs(vector long long __a,
-                                                    vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packs(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshss */
-
-static __inline__ vector signed char __attribute__((__always_inline__))
-vec_vpkshss(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshss(__b, __a);
-#else
-  return __builtin_altivec_vpkshss(__a, __b);
-#endif
-}
-
-/* vec_vpksdss */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector int __ATTRS_o_ai vec_vpksdss(vector long long __a,
-                                                      vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdss(__b, __a);
-#else
-  return __builtin_altivec_vpksdss(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkuhus */
-
-static __inline__ vector unsigned char __attribute__((__always_inline__))
-vec_vpkuhus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkudus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vpkudus(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkswss */
-
-static __inline__ vector signed short __attribute__((__always_inline__))
-vec_vpkswss(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswss(__b, __a);
-#else
-  return __builtin_altivec_vpkswss(__a, __b);
-#endif
-}
-
-/* vec_vpkuwus */
-
-static __inline__ vector unsigned short __attribute__((__always_inline__))
-vec_vpkuwus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_packsu */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_packsu(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_packsu(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_packsu(vector unsigned long long __a, vector unsigned long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkudus(__b, __a);
-#else
-  return __builtin_altivec_vpkudus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_vpkshus */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector short __a, vector short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkshus(__b, __a);
-#else
-  return __builtin_altivec_vpkshus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vpkshus(vector unsigned short __a, vector unsigned short __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuhus(__b, __a);
-#else
-  return __builtin_altivec_vpkuhus(__a, __b);
-#endif
-}
-
-/* vec_vpkswus */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkswus(__b, __a);
-#else
-  return __builtin_altivec_vpkswus(__a, __b);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vpkswus(vector unsigned int __a, vector unsigned int __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpkuwus(__b, __a);
-#else
-  return __builtin_altivec_vpkuwus(__a, __b);
-#endif
-}
-
-/* vec_vpksdus */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vpksdus(vector long long __a, vector long long __b) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vpksdus(__b, __a);
-#else
-  return __builtin_altivec_vpksdus(__a, __b);
-#endif
-}
-#endif
-
-/* vec_perm */
-
-// The vperm instruction is defined architecturally with a big-endian bias.
-// For little endian, we swap the input operands and invert the permute
-// control vector.  Only the rightmost 5 bits matter, so we could use
-// a vector of all 31s instead of all 255s to perform the inversion.
-// However, when the PCV is not a constant, using 255 has an advantage
-// in that the vec_xor can be recognized as a vec_nor (and for P8 and
-// later, possibly a vec_nand).
-
-static __inline__ vector signed char __ATTRS_o_ai vec_perm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                         (vector int)__a, __d);
-#else
-  return (vector signed char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                         (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_perm(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned char)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_perm(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__b,
-                                                       (vector int)__a, __d);
-#else
-  return (vector bool char)__builtin_altivec_vperm_4si((vector int)__a,
-                                                       (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_perm(vector signed short __a,
-                                                     vector signed short __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector signed short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_perm(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned short)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_perm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__b,
-                                                        (vector int)__a, __d);
-#else
-  return (vector bool short)__builtin_altivec_vperm_4si((vector int)__a,
-                                                        (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_perm(vector pixel __a,
-                                                     vector pixel __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector pixel)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_perm(vector signed int __a,
-                                                   vector signed int __b,
-                                                   vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed int)__builtin_altivec_vperm_4si(__b, __a, __d);
-#else
-  return (vector signed int)__builtin_altivec_vperm_4si(__a, __b, __c);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_perm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                          (vector int)__a, __d);
-#else
-  return (vector unsigned int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                          (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_perm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__b,
-                                                      (vector int)__a, __d);
-#else
-  return (vector bool int)__builtin_altivec_vperm_4si((vector int)__a,
-                                                      (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_perm(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__b,
-                                                   (vector int)__a, __d);
-#else
-  return (vector float)__builtin_altivec_vperm_4si((vector int)__a,
-                                                   (vector int)__b, __c);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai
-vec_perm(vector signed long long __a, vector signed long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector signed long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_perm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector unsigned long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_perm(vector bool long long __a, vector bool long long __b,
-         vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__b, (vector int)__a, __d);
-#else
-  return (vector bool long long)__builtin_altivec_vperm_4si(
-      (vector int)__a, (vector int)__b, __c);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_perm(vector double __a, vector double __b, vector unsigned char __c) {
-#ifdef __LITTLE_ENDIAN__
-  vector unsigned char __d = {255, 255, 255, 255, 255, 255, 255, 255,
-                              255, 255, 255, 255, 255, 255, 255, 255};
-  __d = vec_xor(__c, __d);
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__b,
-                                                    (vector int)__a, __d);
-#else
-  return (vector double)__builtin_altivec_vperm_4si((vector int)__a,
-                                                    (vector int)__b, __c);
-#endif
-}
-#endif
-
-/* vec_vperm */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vperm(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vperm(vector unsigned char __a, vector unsigned char __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vperm(
-    vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vperm(vector short __a, vector short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vperm(vector unsigned short __a, vector unsigned short __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vperm(
-    vector bool short __a, vector bool short __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai
-vec_vperm(vector pixel __a, vector pixel __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vperm(vector int __a,
-                                                    vector int __b,
-                                                    vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vperm(vector unsigned int __a, vector unsigned int __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vperm(vector bool int __a, vector bool int __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vperm(vector float __a, vector float __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-#ifdef __VSX__
-static __inline__ vector long long __ATTRS_o_ai vec_vperm(
-    vector long long __a, vector long long __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vperm(vector unsigned long long __a, vector unsigned long long __b,
-          vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vperm(vector double __a, vector double __b, vector unsigned char __c) {
-  return vec_perm(__a, __b, __c);
-}
-#endif
-
-/* vec_re */
-
-static __inline__ vector float __ATTRS_o_ai vec_re(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvresp(__a);
-#else
-  return __builtin_altivec_vrefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_re(vector double __a) {
-  return __builtin_vsx_xvredp(__a);
-}
-#endif
-
-/* vec_vrefp */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrefp(vector float __a) {
-  return __builtin_altivec_vrefp(__a);
-}
-
-/* vec_rl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_rl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_rl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_rl(vector short __a,
-                                                   vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_rl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_rl(vector int __a,
-                                                 vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_rl(vector signed long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vrld(__a, __b);
-}
-#endif
-
-/* vec_rlmi */
-#ifdef __POWER9_VECTOR__
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlmi(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  return __builtin_altivec_vrlwmi(__a, __c, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlmi(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  return __builtin_altivec_vrldmi(__a, __c, __b);
-}
-
-/* vec_rlnm */
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_rlnm(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int OneByte = { 0x8, 0x8, 0x8, 0x8 };
-  return __builtin_altivec_vrlwnm(__a, ((__c << OneByte) | __b));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_rlnm(vector unsigned long long __a, vector unsigned long long __b,
-         vector unsigned long long __c) {
-  vector unsigned long long OneByte = { 0x8, 0x8 };
-  return __builtin_altivec_vrldnm(__a, ((__c << OneByte) | __b));
-}
-#endif
-
-/* vec_vrlb */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vrlb(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vrlb(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vrlb((vector char)__a, __b);
-}
-
-/* vec_vrlh */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vrlh(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vrlh(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vrlh(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vrlh((vector short)__a, __b);
-}
-
-/* vec_vrlw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vrlw(vector int __a,
-                                                   vector unsigned int __b) {
-  return __builtin_altivec_vrlw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vrlw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vrlw((vector int)__a, __b);
-}
-
-/* vec_round */
-
-static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspi(__a);
-#else
-  return __builtin_altivec_vrfin(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-
-/* vec_rint */
-
-static __inline__ vector float __ATTRS_o_ai vec_rint(vector float __a) {
-  return __builtin_vsx_xvrspic(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) {
-  return __builtin_vsx_xvrdpic(__a);
-}
-
-/* vec_nearbyint */
-
-static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
-  return __builtin_vsx_xvrspi(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_nearbyint(vector double __a) {
-  return __builtin_vsx_xvrdpi(__a);
-}
-#endif
-
-/* vec_vrfin */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfin(vector float __a) {
-  return __builtin_altivec_vrfin(__a);
-}
-
-/* vec_sqrt */
-
-#ifdef __VSX__
-static __inline__ vector float __ATTRS_o_ai vec_sqrt(vector float __a) {
-  return __builtin_vsx_xvsqrtsp(__a);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sqrt(vector double __a) {
-  return __builtin_vsx_xvsqrtdp(__a);
-}
-#endif
-
-/* vec_rsqrte */
-
-static __inline__ vector float __ATTRS_o_ai vec_rsqrte(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrsqrtesp(__a);
-#else
-  return __builtin_altivec_vrsqrtefp(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
-  return __builtin_vsx_xvrsqrtedp(__a);
-}
-#endif
-
-/* vec_vrsqrtefp */
-
-static __inline__ __vector float __attribute__((__always_inline__))
-vec_vrsqrtefp(vector float __a) {
-  return __builtin_altivec_vrsqrtefp(__a);
-}
-
-/* vec_sel */
-
-#define __builtin_altivec_vsel_4si vec_sel
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sel(vector unsigned char __a, vector unsigned char __b,
-        vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_sel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_sel(vector bool char __a,
-                                                        vector bool char __b,
-                                                        vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sel(vector short __a,
-                                                    vector short __b,
-                                                    vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sel(vector unsigned short __a, vector unsigned short __b,
-        vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_sel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sel(vector int __a,
-                                                  vector int __b,
-                                                  vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sel(vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sel(vector bool int __a,
-                                                       vector bool int __b,
-                                                       vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sel(vector float __a,
-                                                    vector float __b,
-                                                    vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector bool long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_sel(vector double __a, vector double __b, vector unsigned long long __c) {
-  vector long long __res = ((vector long long)__a & ~(vector long long)__c) |
-                           ((vector long long)__b & (vector long long)__c);
-  return (vector double)__res;
-}
-#endif
-
-/* vec_vsel */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_vsel(
-    vector signed char __a, vector signed char __b, vector unsigned char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsel(vector signed char __a, vector signed char __b, vector bool char __c) {
-  return (__a & ~(vector signed char)__c) | (__b & (vector signed char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsel(vector unsigned char __a, vector unsigned char __b,
-         vector unsigned char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsel(
-    vector unsigned char __a, vector unsigned char __b, vector bool char __c) {
-  return (__a & ~(vector unsigned char)__c) | (__b & (vector unsigned char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsel(vector bool char __a, vector bool char __b, vector unsigned char __c) {
-  return (__a & ~(vector bool char)__c) | (__b & (vector bool char)__c);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vsel(vector bool char __a,
-                                                         vector bool char __b,
-                                                         vector bool char __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsel(vector short __a, vector short __b, vector unsigned short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsel(vector short __a,
-                                                     vector short __b,
-                                                     vector bool short __c) {
-  return (__a & ~(vector short)__c) | (__b & (vector short)__c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector unsigned short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsel(vector unsigned short __a, vector unsigned short __b,
-         vector bool short __c) {
-  return (__a & ~(vector unsigned short)__c) |
-         (__b & (vector unsigned short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai vec_vsel(
-    vector bool short __a, vector bool short __b, vector unsigned short __c) {
-  return (__a & ~(vector bool short)__c) | (__b & (vector bool short)__c);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsel(vector bool short __a, vector bool short __b, vector bool short __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector unsigned int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsel(vector int __a,
-                                                   vector int __b,
-                                                   vector bool int __c) {
-  return (__a & ~(vector int)__c) | (__b & (vector int)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsel(
-    vector unsigned int __a, vector unsigned int __b, vector bool int __c) {
-  return (__a & ~(vector unsigned int)__c) | (__b & (vector unsigned int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsel(vector bool int __a, vector bool int __b, vector unsigned int __c) {
-  return (__a & ~(vector bool int)__c) | (__b & (vector bool int)__c);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vsel(vector bool int __a,
-                                                        vector bool int __b,
-                                                        vector bool int __c) {
-  return (__a & ~__c) | (__b & __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector unsigned int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsel(vector float __a,
-                                                     vector float __b,
-                                                     vector bool int __c) {
-  vector int __res = ((vector int)__a & ~(vector int)__c) |
-                     ((vector int)__b & (vector int)__c);
-  return (vector float)__res;
-}
-
-/* vec_sl */
-
-// vec_sl does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sl(vector unsigned char __a, vector unsigned char __b) {
-  return __a << (__b %
-                 (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sl((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sl(vector unsigned short __a, vector unsigned short __b) {
-  return __a << (__b % (vector unsigned short)(sizeof(unsigned short) *
-                                               __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sl(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sl((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sl(vector unsigned int __a, vector unsigned int __b) {
-  return __a << (__b %
-                 (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sl(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sl((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a << (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sl(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sl((vector unsigned long long)__a, __b);
-}
-#endif
-
-/* vec_vslb */
-
-#define __builtin_altivec_vslb vec_vslb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslb(vector signed char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslh */
-
-#define __builtin_altivec_vslh vec_vslh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vslh(vector short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_vslw */
-
-#define __builtin_altivec_vslw vec_vslw
-
-static __inline__ vector int __ATTRS_o_ai vec_vslw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sl(__a, __b);
-}
-
-/* vec_sld */
-
-#define __builtin_altivec_vsldoi_4si vec_sld
-
-static __inline__ vector signed char __ATTRS_o_ai vec_sld(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sld(vector unsigned char __a, vector unsigned char __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sld(vector bool char __a, vector bool char __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sld(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sld(vector unsigned short __a, vector unsigned short __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sld(vector bool short __a, vector bool short __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sld(vector pixel __a,
-                                                    vector pixel __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sld(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sld(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_sld(vector bool int __a,
-                                                       vector bool int __b,
-                                                       unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,
-                                                    vector float __b,
-                                                    unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_sld(vector bool long long __a, vector bool long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sld(vector signed long long __a, vector signed long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sld(vector unsigned long long __a, vector unsigned long long __b,
-        unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a,
-                                                     vector double __b,
-                                                     unsigned const int __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-#endif
-
-/* vec_sldw */
-static __inline__ vector signed char __ATTRS_o_ai vec_sldw(
-    vector signed char __a, vector signed char __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sldw(vector unsigned char __a, vector unsigned char __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai vec_sldw(
-    vector signed short __a, vector signed short __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sldw(vector unsigned short __a, vector unsigned short __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw(
-    vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sldw(vector signed long long __a, vector signed long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sldw(vector unsigned long long __a, vector unsigned long long __b,
-         unsigned const int __c) {
-  return vec_sld(__a, __b, ((__c << 2) & 0x0F));
-}
-#endif
-
-#ifdef __POWER9_VECTOR__
-/* vec_slv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vslv(__a, __b);
-}
-
-/* vec_srv */
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srv(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsrv(__a, __b);
-}
-#endif
-
-/* vec_vsldoi */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsldoi(vector signed char __a, vector signed char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai vec_vsldoi(
-    vector unsigned char __a, vector unsigned char __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsldoi(vector short __a,
-                                                       vector short __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai vec_vsldoi(
-    vector unsigned short __a, vector unsigned short __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsldoi(vector pixel __a,
-                                                       vector pixel __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsldoi(vector int __a,
-                                                     vector int __b,
-                                                     unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_vsldoi(
-    vector unsigned int __a, vector unsigned int __b, unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsldoi(vector float __a,
-                                                       vector float __b,
-                                                       unsigned char __c) {
-  unsigned char __d = __c & 0x0F;
-#ifdef __LITTLE_ENDIAN__
-  return vec_perm(
-      __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
-                                       20 - __d, 21 - __d, 22 - __d, 23 - __d,
-                                       24 - __d, 25 - __d, 26 - __d, 27 - __d,
-                                       28 - __d, 29 - __d, 30 - __d, 31 - __d));
-#else
-  return vec_perm(
-      __a, __b,
-      (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
-                             __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
-                             __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
-#endif
-}
-
-/* vec_sll */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sll(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sll(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_sll(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sll(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sll(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_sll(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sll(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sll(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sll(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_sll(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sll(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsl((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sll(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsl((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsl((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsl((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsl((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsl((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsl((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsl(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsl((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsl((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_slo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_slo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_slo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_slo(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_slo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_slo(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_slo(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_slo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_slo(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_slo(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vslo */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vslo(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vslo((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vslo(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vslo((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vslo(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vslo(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vslo((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vslo(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vslo(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vslo(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vslo(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vslo((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vslo(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
-}
-
-/* vec_splat */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat(vector signed char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat(vector unsigned char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_splat(vector bool char __a, unsigned const int __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b & 0x0F));
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_splat(vector signed short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat(vector unsigned short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_splat(vector bool short __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_splat(vector pixel __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x07) * 2;
-  unsigned char b1 = b0 + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b0, b1, b0, b1, b0, b1, b0, b1,
-                                         b0, b1, b0, b1, b0, b1));
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_splat(vector signed int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat(vector unsigned int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_splat(vector bool int __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_splat(vector float __a,
-                                                      unsigned const int __b) {
-  unsigned char b0 = (__b & 0x03) * 4;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b0, b1, b2, b3, b0, b1,
-                                         b2, b3, b0, b1, b2, b3));
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_splat(vector double __a,
-                                                       unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_splat(vector bool long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splat(vector signed long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splat(vector unsigned long long __a, unsigned const int __b) {
-  unsigned char b0 = (__b & 0x01) * 8;
-  unsigned char b1 = b0 + 1, b2 = b0 + 2, b3 = b0 + 3, b4 = b0 + 4, b5 = b0 + 5,
-                b6 = b0 + 6, b7 = b0 + 7;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(b0, b1, b2, b3, b4, b5, b6, b7, b0, b1,
-                                         b2, b3, b4, b5, b6, b7));
-}
-#endif
-
-/* vec_vspltb */
-
-#define __builtin_altivec_vspltb vec_vspltb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltb(vector signed char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vspltb(vector unsigned char __a, unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vspltb(vector bool char __a,
-                                                           unsigned char __b) {
-  return vec_perm(__a, __a, (vector unsigned char)(__b));
-}
-
-/* vec_vsplth */
-
-#define __builtin_altivec_vsplth vec_vsplth
-
-static __inline__ vector short __ATTRS_o_ai vec_vsplth(vector short __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsplth(vector unsigned short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsplth(vector bool short __a, unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsplth(vector pixel __a,
-                                                       unsigned char __b) {
-  __b *= 2;
-  unsigned char b1 = __b + 1;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, __b, b1, __b, b1, __b, b1,
-                                         __b, b1, __b, b1, __b, b1, __b, b1));
-}
-
-/* vec_vspltw */
-
-#define __builtin_altivec_vspltw vec_vspltw
-
-static __inline__ vector int __ATTRS_o_ai vec_vspltw(vector int __a,
-                                                     unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vspltw(vector unsigned int __a, unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vspltw(vector bool int __a,
-                                                          unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vspltw(vector float __a,
-                                                       unsigned char __b) {
-  __b *= 4;
-  unsigned char b1 = __b + 1, b2 = __b + 2, b3 = __b + 3;
-  return vec_perm(__a, __a,
-                  (vector unsigned char)(__b, b1, b2, b3, __b, b1, b2, b3, __b,
-                                         b1, b2, b3, __b, b1, b2, b3));
-}
-
-/* vec_splat_s8 */
-
-#define __builtin_altivec_vspltisb vec_splat_s8
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_splat_s8(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_vspltisb */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vspltisb(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-/* vec_splat_s16 */
-
-#define __builtin_altivec_vspltish vec_splat_s16
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_splat_s16(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_vspltish */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector short __ATTRS_o_ai vec_vspltish(signed char __a) {
-  return (vector short)(__a);
-}
-
-/* vec_splat_s32 */
-
-#define __builtin_altivec_vspltisw vec_splat_s32
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_splat_s32(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_vspltisw */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector int __ATTRS_o_ai vec_vspltisw(signed char __a) {
-  return (vector int)(__a);
-}
-
-/* vec_splat_u8 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splat_u8(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-/* vec_splat_u16 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splat_u16(signed char __a) {
-  return (vector unsigned short)(__a);
-}
-
-/* vec_splat_u32 */
-
-// FIXME: parameter should be treated as 5-bit signed literal
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splat_u32(signed char __a) {
-  return (vector unsigned int)(__a);
-}
-
-/* vec_sr */
-
-// vec_sr does modulo arithmetic on __b first, so __b is allowed to be more
-// than the length of __a.
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sr(vector unsigned char __a, vector unsigned char __b) {
-  return __a >>
-         (__b % (vector unsigned char)(sizeof(unsigned char) * __CHAR_BIT__));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)vec_sr((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sr(vector unsigned short __a, vector unsigned short __b) {
-  return __a >>
-         (__b % (vector unsigned short)(sizeof(unsigned short) * __CHAR_BIT__));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sr(vector short __a,
-                                                   vector unsigned short __b) {
-  return (vector short)vec_sr((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sr(vector unsigned int __a, vector unsigned int __b) {
-  return __a >>
-         (__b % (vector unsigned int)(sizeof(unsigned int) * __CHAR_BIT__));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sr(vector int __a,
-                                                 vector unsigned int __b) {
-  return (vector int)vec_sr((vector unsigned int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a >> (__b % (vector unsigned long long)(sizeof(unsigned long long) *
-                                                   __CHAR_BIT__));
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_sr(vector long long __a, vector unsigned long long __b) {
-  return (vector long long)vec_sr((vector unsigned long long)__a, __b);
-}
-#endif
-
-/* vec_vsrb */
-
-#define __builtin_altivec_vsrb vec_vsrb
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrb(vector signed char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrb(vector unsigned char __a, vector unsigned char __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrh */
-
-#define __builtin_altivec_vsrh vec_vsrh
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrh(vector short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrh(vector unsigned short __a, vector unsigned short __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_vsrw */
-
-#define __builtin_altivec_vsrw vec_vsrw
-
-static __inline__ vector int __ATTRS_o_ai vec_vsrw(vector int __a,
-                                                   vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsrw(vector unsigned int __a, vector unsigned int __b) {
-  return vec_sr(__a, __b);
-}
-
-/* vec_sra */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sra(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sra(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sra(vector short __a,
-                                                    vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sra(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sra(vector int __a,
-                                                  vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sra(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sra(vector signed long long __a, vector unsigned long long __b) {
-  return __a >> __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)((vector signed long long)__a >> __b);
-}
-#endif
-
-/* vec_vsrab */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsrab(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsrab(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsrab((vector char)__a, __b);
-}
-
-/* vec_vsrah */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vsrah(vector short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsrah(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsrah(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsrah((vector short)__a, __b);
-}
-
-/* vec_vsraw */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsraw(vector int __a,
-                                                    vector unsigned int __b) {
-  return __builtin_altivec_vsraw(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsraw(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsraw((vector int)__a, __b);
-}
-
-/* vec_srl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_srl(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_srl(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_srl(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_srl(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_srl(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_srl(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_srl(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_srl(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_srl(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_srl(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_srl(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsr((vector int)__a,
-                                                        (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_srl(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a,
-                                                          (vector int)__b);
-}
-#endif
-
-/* vec_vsr */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned short __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsr(vector signed char __a, vector unsigned int __b) {
-  return (vector signed char)__builtin_altivec_vsr((vector int)__a,
-                                                   (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned short __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsr(vector unsigned char __a, vector unsigned int __b) {
-  return (vector unsigned char)__builtin_altivec_vsr((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned char __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned short __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsr(vector bool char __a, vector unsigned int __b) {
-  return (vector bool char)__builtin_altivec_vsr((vector int)__a,
-                                                 (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned short __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsr(vector short __a,
-                                                    vector unsigned int __b) {
-  return (vector short)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned short __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsr(vector unsigned short __a, vector unsigned int __b) {
-  return (vector unsigned short)__builtin_altivec_vsr((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned char __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned short __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsr(vector bool short __a, vector unsigned int __b) {
-  return (vector bool short)__builtin_altivec_vsr((vector int)__a,
-                                                  (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned short __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsr(vector pixel __a,
-                                                    vector unsigned int __b) {
-  return (vector pixel)__builtin_altivec_vsr((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned short __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsr(vector int __a,
-                                                  vector unsigned int __b) {
-  return (vector int)__builtin_altivec_vsr(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned short __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsr(vector unsigned int __a, vector unsigned int __b) {
-  return (vector unsigned int)__builtin_altivec_vsr((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned char __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned short __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsr(vector bool int __a, vector unsigned int __b) {
-  return (vector bool int)__builtin_altivec_vsr((vector int)__a,
-                                                (vector int)__b);
-}
-
-/* vec_sro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sro(vector short __a,
-                                                    vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_sro(vector pixel __a,
-                                                    vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sro(vector int __a,
-                                                  vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
-                                                    vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector signed char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sro(vector signed long long __a, vector unsigned char __b) {
-  return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
-                                                         (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector signed char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sro(vector unsigned long long __a, vector unsigned char __b) {
-  return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
-                                                           (vector int)__b);
-}
-#endif
-
-/* vec_vsro */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector signed char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsro(vector signed char __a, vector unsigned char __b) {
-  return (vector signed char)__builtin_altivec_vsro((vector int)__a,
-                                                    (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector signed char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsro(vector unsigned char __a, vector unsigned char __b) {
-  return (vector unsigned char)__builtin_altivec_vsro((vector int)__a,
-                                                      (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector signed char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsro(vector short __a,
-                                                     vector unsigned char __b) {
-  return (vector short)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector signed char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsro(vector unsigned short __a, vector unsigned char __b) {
-  return (vector unsigned short)__builtin_altivec_vsro((vector int)__a,
-                                                       (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector signed char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_vsro(vector pixel __a,
-                                                     vector unsigned char __b) {
-  return (vector pixel)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector signed char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsro(vector int __a,
-                                                   vector unsigned char __b) {
-  return (vector int)__builtin_altivec_vsro(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector signed char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsro(vector unsigned int __a, vector unsigned char __b) {
-  return (vector unsigned int)__builtin_altivec_vsro((vector int)__a,
-                                                     (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector signed char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
-                                                     vector unsigned char __b) {
-  return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
-}
-
-/* vec_st */
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                           vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                           vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
-                                           vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
-                                           vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                           vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
-                                           vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
-                                           vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b,
-                                           vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                           vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
-                                           vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
-                                           vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
-                                           float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_stvx */
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                             vector signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                             vector unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             signed char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             unsigned char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
-                                             vector bool char *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
-                                             vector short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                             vector unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
-                                             vector bool short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             unsigned short *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
-                                             vector pixel *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
-                                             vector int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
-                                             int *__c) {
-  __builtin_altivec_stvx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                             vector unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             unsigned int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
-                                             vector bool int *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
-                                             vector float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
-                                             float *__c) {
-  __builtin_altivec_stvx((vector int)__a, __b, __c);
-}
-
-/* vec_ste */
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
-                                            int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, int __b,
-                                            float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stvebx */
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                               signed char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
-                                               unsigned char *__c) {
-  __builtin_altivec_stvebx((vector char)__a, __b, __c);
-}
-
-/* vec_stvehx */
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
-                                               short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_altivec_stvehx((vector short)__a, __b, __c);
-}
-
-/* vec_stvewx */
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, int __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
-                                               int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, int __b,
-                                               float *__c) {
-  __builtin_altivec_stvewx((vector int)__a, __b, __c);
-}
-
-/* vec_stl */
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector signed char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool char __a, int __b,
-                                            vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool short __a, int __b,
-                                            vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector pixel __a, int __b,
-                                            vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b,
-                                            vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector int __a, int __b, int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector unsigned int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector bool int __a, int __b,
-                                            vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stl(vector float __a, int __b,
-                                            float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_stvxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector signed char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              signed char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              unsigned char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              vector short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              unsigned short *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              vector int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl(__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              unsigned int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              vector float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvxl(vector float __a, int __b,
-                                              float *__c) {
-  __builtin_altivec_stvxl((vector int)__a, __b, __c);
-}
-
-/* vec_sub */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_sub(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_sub(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_sub(vector short __a,
-                                                    vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_sub(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sub(vector int __a,
-                                                  vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sub(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sub(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_sub(vector signed long long __a, vector signed long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_sub(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a - __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_sub(vector double __a,
-                                                     vector double __b) {
-  return __a - __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_sub(vector float __a,
-                                                    vector float __b) {
-  return __a - __b;
-}
-
-/* vec_vsububm */
-
-#define __builtin_altivec_vsububm vec_vsububm
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector signed char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a - __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsububm(vector signed char __a, vector bool char __b) {
-  return __a - (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector unsigned char __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a - __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububm(vector unsigned char __a, vector bool char __b) {
-  return __a - (vector unsigned char)__b;
-}
-
-/* vec_vsubuhm */
-
-#define __builtin_altivec_vsubuhm vec_vsubuhm
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector bool short __a,
-                                                        vector short __b) {
-  return (vector short)__a - __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubuhm(vector short __a,
-                                                        vector bool short __b) {
-  return __a - (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector unsigned short __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a - __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhm(vector unsigned short __a, vector bool short __b) {
-  return __a - (vector unsigned short)__b;
-}
-
-/* vec_vsubuwm */
-
-#define __builtin_altivec_vsubuwm vec_vsubuwm
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector bool int __a,
-                                                      vector int __b) {
-  return (vector int)__a - __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubuwm(vector int __a,
-                                                      vector bool int __b) {
-  return __a - (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector unsigned int __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a - __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuwm(vector unsigned int __a, vector bool int __b) {
-  return __a - (vector unsigned int)__b;
-}
-
-/* vec_vsubfp */
-
-#define __builtin_altivec_vsubfp vec_vsubfp
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vsubfp(vector float __a, vector float __b) {
-  return __a - __b;
-}
-
-/* vec_subc */
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subc(vector signed int __a, vector signed int __b) {
-  return (vector signed int)__builtin_altivec_vsubcuw((vector unsigned int)__a,
-                                                      (vector unsigned int) __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subc(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-/* vec_vsubcuw */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsubcuw(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubcuw(__a, __b);
-}
-
-/* vec_subs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_subs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_subs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector bool short __a,
-                                                     vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_subs(vector short __a,
-                                                     vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_subs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector bool int __a,
-                                                   vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_subs(vector int __a,
-                                                   vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subs(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-/* vec_vsubsbs */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector bool char __a, vector signed char __b) {
-  return __builtin_altivec_vsubsbs((vector signed char)__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsubsbs(vector signed char __a, vector bool char __b) {
-  return __builtin_altivec_vsubsbs(__a, (vector signed char)__b);
-}
-
-/* vec_vsububs */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector bool char __a, vector unsigned char __b) {
-  return __builtin_altivec_vsububs((vector unsigned char)__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsububs(vector unsigned char __a, vector bool char __b) {
-  return __builtin_altivec_vsububs(__a, (vector unsigned char)__b);
-}
-
-/* vec_vsubshs */
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs(__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector bool short __a,
-                                                        vector short __b) {
-  return __builtin_altivec_vsubshs((vector short)__a, __b);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vsubshs(vector short __a,
-                                                        vector bool short __b) {
-  return __builtin_altivec_vsubshs(__a, (vector short)__b);
-}
-
-/* vec_vsubuhs */
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector bool short __a, vector unsigned short __b) {
-  return __builtin_altivec_vsubuhs((vector unsigned short)__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsubuhs(vector unsigned short __a, vector bool short __b) {
-  return __builtin_altivec_vsubuhs(__a, (vector unsigned short)__b);
-}
-
-/* vec_vsubsws */
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector bool int __a,
-                                                      vector int __b) {
-  return __builtin_altivec_vsubsws((vector int)__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vsubsws(vector int __a,
-                                                      vector bool int __b) {
-  return __builtin_altivec_vsubsws(__a, (vector int)__b);
-}
-
-/* vec_vsubuws */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector bool int __a, vector unsigned int __b) {
-  return __builtin_altivec_vsubuws((vector unsigned int)__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsubuws(vector unsigned int __a, vector bool int __b) {
-  return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-/* vec_vsubuqm */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
-  return __a - __b;
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __a - __b;
-}
-
-/* vec_vsubeuqm */
-
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_sube(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubeuqm(__a, __b, __c);
-}
-
-/* vec_vsubcuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
-  return __builtin_altivec_vsubcuq(__a, __b);
-}
-
-/* vec_vsubecuq */
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_subec(vector signed int __a, vector signed int __b,
-             vector signed int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_subec(vector unsigned int __a, vector unsigned int __b,
-             vector unsigned int __c) {
-  return vec_addec(__a, ~__b, __c);
-}
-
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_subec(vector signed __int128 __a, vector signed __int128 __b,
-             vector signed __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
-             vector unsigned __int128 __c) {
-  return __builtin_altivec_vsubecuq(__a, __b, __c);
-}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_sube(vector signed int __a, vector signed int __b,
-         vector signed int __c) {
-  vector signed int __mask = {1, 1, 1, 1};
-  vector signed int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sube(vector unsigned int __a, vector unsigned int __b,
-         vector unsigned int __c) {
-  vector unsigned int __mask = {1, 1, 1, 1};
-  vector unsigned int __carry = __c & __mask;
-  return vec_adde(__a, ~__b, __carry);
-}
-/* vec_sum4s */
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_sum4s(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed short __a,
-                                                    vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_vsum4sbs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4sbs(vector signed char __a, vector int __b) {
-  return __builtin_altivec_vsum4sbs(__a, __b);
-}
-
-/* vec_vsum4ubs */
-
-static __inline__ vector unsigned int __attribute__((__always_inline__))
-vec_vsum4ubs(vector unsigned char __a, vector unsigned int __b) {
-  return __builtin_altivec_vsum4ubs(__a, __b);
-}
-
-/* vec_vsum4shs */
-
-static __inline__ vector int __attribute__((__always_inline__))
-vec_vsum4shs(vector signed short __a, vector int __b) {
-  return __builtin_altivec_vsum4shs(__a, __b);
-}
-
-/* vec_sum2s */
-
-/* The vsum2sws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian elements
-   1 and 3 (little-endian element 0 and 2).  For ease of porting the
-   programmer wants elements 1 and 3 in both cases, so for little
-   endian we must perform some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sum2s(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_vsum2sws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsum2sws(vector int __a, vector int __b) {
-#ifdef __LITTLE_ENDIAN__
-  vector int __c = (vector signed int)vec_perm(
-      __b, __b, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-  __c = __builtin_altivec_vsum2sws(__a, __c);
-  return (vector signed int)vec_perm(
-      __c, __c, (vector unsigned char)(4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15,
-                                       8, 9, 10, 11));
-#else
-  return __builtin_altivec_vsum2sws(__a, __b);
-#endif
-}
-
-/* vec_sums */
-
-/* The vsumsws instruction has a big-endian bias, so that the second
-   input vector and the result always reference big-endian element 3
-   (little-endian element 0).  For ease of porting the programmer
-   wants element 3 in both cases, so for little endian we must perform
-   some permutes.  */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_sums(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_vsumsws */
-
-static __inline__ vector signed int __attribute__((__always_inline__))
-vec_vsumsws(vector signed int __a, vector signed int __b) {
-#ifdef __LITTLE_ENDIAN__
-  __b = (vector signed int)vec_splat(__b, 3);
-  __b = __builtin_altivec_vsumsws(__a, __b);
-  return (vector signed int)(0, 0, 0, __b[0]);
-#else
-  return __builtin_altivec_vsumsws(__a, __b);
-#endif
-}
-
-/* vec_trunc */
-
-static __inline__ vector float __ATTRS_o_ai vec_trunc(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvrspiz(__a);
-#else
-  return __builtin_altivec_vrfiz(__a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) {
-  return __builtin_vsx_xvrdpiz(__a);
-}
-#endif
-
-/* vec_vrfiz */
-
-static __inline__ vector float __attribute__((__always_inline__))
-vec_vrfiz(vector float __a) {
-  return __builtin_altivec_vrfiz(__a);
-}
-
-/* vec_unpackh */
-
-/* The vector unpack instructions all have a big-endian bias, so for
-   little endian we must reverse the meanings of "high" and "low."  */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackh(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackh(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackh(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackh(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackh(vector float __a) {
-  return (vector double)(__a[0], __a[1]);
-}
-#endif
-
-/* vec_vupkhsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupkhsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsb((vector char)__a);
-#else
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupkhsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#endif
-}
-
-/* vec_vupkhsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupkhsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsh(__a);
-#else
-  return __builtin_altivec_vupkhsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupkhsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupkhsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#endif
-}
-
-/* vec_vupkhsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupkhsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupklsw(__a);
-#else
-  return __builtin_altivec_vupkhsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupkhsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_unpackl */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_unpackl(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_unpackl(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_unpackl(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_unpackl(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_unpackl(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_unpackl(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_unpackl(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_unpackl(vector float __a) {
-  return (vector double)(__a[2], __a[3]);
-}
-#endif
-
-/* vec_vupklsb */
-
-static __inline__ vector short __ATTRS_o_ai
-vec_vupklsb(vector signed char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return __builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vupklsb(vector bool char __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool short)__builtin_altivec_vupkhsb((vector char)__a);
-#else
-  return (vector bool short)__builtin_altivec_vupklsb((vector char)__a);
-#endif
-}
-
-/* vec_vupklsh */
-
-static __inline__ vector int __ATTRS_o_ai vec_vupklsh(vector short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsh(__a);
-#else
-  return __builtin_altivec_vupklsh(__a);
-#endif
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vupklsh(vector bool short __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool int)__builtin_altivec_vupkhsh((vector short)__a);
-#else
-  return (vector bool int)__builtin_altivec_vupklsh((vector short)__a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vupklsh(vector pixel __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector unsigned int)__builtin_altivec_vupkhpx((vector short)__a);
-#else
-  return (vector unsigned int)__builtin_altivec_vupklpx((vector short)__a);
-#endif
-}
-
-/* vec_vupklsw */
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector long long __ATTRS_o_ai vec_vupklsw(vector int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vupkhsw(__a);
-#else
-  return __builtin_altivec_vupklsw(__a);
-#endif
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vupklsw(vector bool int __a) {
-#ifdef __LITTLE_ENDIAN__
-  return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
-#else
-  return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
-#endif
-}
-#endif
-
-/* vec_vsx_ld */
-
-#ifdef __VSX__
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool int *__b) {
-  return (vector bool int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed int *__b) {
-  return (vector signed int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned int *__b) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vsx_ld(int __a,
-                                                       const float *__b) {
-  return (vector float)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed long long *__b) {
-  return (vector signed long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned long long *__b) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_vsx_ld(int __a, const double *__b) {
-  return (vector double)__builtin_vsx_lxvd2x(__a, __b);
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool short *__b) {
-  return (vector bool short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed short *__b) {
-  return (vector signed short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned short *__b) {
-  return (vector unsigned short)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector bool char *__b) {
-  return (vector bool char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vsx_ld(int __a, const signed char *__b) {
-  return (vector signed char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const vector unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vsx_ld(int __a, const unsigned char *__b) {
-  return (vector unsigned char)__builtin_vsx_lxvw4x(__a, __b);
-}
-
-#endif
-
-/* vec_vsx_st */
-
-#ifdef __VSX__
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               vector signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed int __a, int __b,
-                                               signed int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               vector float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector float __a, int __b,
-                                               float *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed long long __a,
-                                               int __b,
-                                               vector signed long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned long long __a,
-                                               int __b,
-                                               vector unsigned long long *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               vector double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector double __a, int __b,
-                                               double *__c) {
-  __builtin_vsx_stxvd2x((vector double)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool short __a, int __b,
-                                               unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               vector signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed short __a, int __b,
-                                               signed short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector bool char __a, int __b,
-                                               unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector signed char __a, int __b,
-                                               signed char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  __builtin_vsx_stxvw4x((vector int)__a, __b, __c);
-}
-
-#endif
-
-#ifdef __VSX__
-#define vec_xxpermdi __builtin_vsx_xxpermdi
-#define vec_xxsldwi __builtin_vsx_xxsldwi
-#endif
-
-/* vec_xor */
-
-#define __builtin_altivec_vxor vec_xor
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_xor(vector bool char __a,
-                                                        vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector bool short __a,
-                                                    vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_xor(vector short __a,
-                                                    vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_xor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                  vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_xor(vector int __a,
-                                                  vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                       vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector bool int __a,
-                                                    vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_xor(vector float __a,
-                                                    vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_xor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector double __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xor(vector double __a, vector bool long long __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-
-static __inline__ vector double __ATTRS_o_ai vec_xor(vector bool long long __a,
-                                                     vector double __b) {
-  return (vector double)((vector unsigned long long)__a ^
-                         (vector unsigned long long)__b);
-}
-#endif
-
-/* vec_vxor */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector signed char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector signed char __b) {
-  return (vector signed char)__a ^ __b;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vxor(vector signed char __a, vector bool char __b) {
-  return __a ^ (vector signed char)__b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector unsigned char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector bool char __a, vector unsigned char __b) {
-  return (vector unsigned char)__a ^ __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vxor(vector unsigned char __a, vector bool char __b) {
-  return __a ^ (vector unsigned char)__b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_vxor(vector bool char __a,
-                                                         vector bool char __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector bool short __a,
-                                                     vector short __b) {
-  return (vector short)__a ^ __b;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_vxor(vector short __a,
-                                                     vector bool short __b) {
-  return __a ^ (vector short)__b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector unsigned short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector unsigned short __b) {
-  return (vector unsigned short)__a ^ __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_vxor(vector unsigned short __a, vector bool short __b) {
-  return __a ^ (vector unsigned short)__b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_vxor(vector bool short __a, vector bool short __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                   vector int __b) {
-  return (vector int)__a ^ __b;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_vxor(vector int __a,
-                                                   vector bool int __b) {
-  return __a ^ (vector int)__b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector unsigned int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector bool int __a, vector unsigned int __b) {
-  return (vector unsigned int)__a ^ __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_vxor(vector unsigned int __a, vector bool int __b) {
-  return __a ^ (vector unsigned int)__b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                        vector bool int __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector bool int __a,
-                                                     vector float __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_vxor(vector float __a,
-                                                     vector bool int __b) {
-  vector unsigned int __res =
-      (vector unsigned int)__a ^ (vector unsigned int)__b;
-  return (vector float)__res;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector signed long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector signed long long __b) {
-  return (vector signed long long)__a ^ __b;
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_vxor(vector signed long long __a, vector bool long long __b) {
-  return __a ^ (vector signed long long)__b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector unsigned long long __b) {
-  return __a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector unsigned long long __b) {
-  return (vector unsigned long long)__a ^ __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_vxor(vector unsigned long long __a, vector bool long long __b) {
-  return __a ^ (vector unsigned long long)__b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_vxor(vector bool long long __a, vector bool long long __b) {
-  return __a ^ __b;
-}
-#endif
-
-/* ------------------------ extensions for CBEA ----------------------------- */
-
-/* vec_extract */
-
-static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
-                                                       int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai
-vec_extract(vector unsigned char __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
-                                                         int __b) {
-  return __a[__b];
-}
-
-static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai
-vec_extract(vector unsigned short __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
-                                                          int __b) {
-  return __a[__b];
-}
-
-static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
-                                                      int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
-                                                        int __b) {
-  return __a[__b];
-}
-
-#ifdef __VSX__
-static __inline__ signed long long __ATTRS_o_ai
-vec_extract(vector signed long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector unsigned long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector bool long long __a, int __b) {
-  return __a[__b];
-}
-
-static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
-  return __a[__b];
-}
-#endif
-
-static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
-  return __a[__b];
-}
-
-#ifdef __POWER9_VECTOR__
-
-#define vec_insert4b __builtin_vsx_insertword
-#define vec_extract4b __builtin_vsx_extractuword
-
-/* vec_extract_exp */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_exp(vector float __a) {
-  return __builtin_vsx_xvxexpsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_exp(vector double __a) {
-  return __builtin_vsx_xvxexpdp(__a);
-}
-
-/* vec_extract_sig */
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_extract_sig(vector float __a) {
-  return __builtin_vsx_xvxsigsp(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_extract_sig (vector double __a) {
-  return __builtin_vsx_xvxsigdp(__a);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shorth(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 0, -1, 1, -1, 2, -1, 3, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 0, -1, 1, -1, 2, -1, 3);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_extract_fp32_from_shortl(vector unsigned short __a) {
-  vector unsigned short __b =
-#ifdef __LITTLE_ENDIAN__
-            __builtin_shufflevector(__a, __a, 4, -1, 5, -1, 6, -1, 7, -1);
-#else
-            __builtin_shufflevector(__a, __a, -1, 4, -1, 5, -1, 6, -1, 7);
-#endif
-  return __builtin_vsx_xvcvhpsp(__b);
-}
-#endif /* __POWER9_VECTOR__ */
-
-/* vec_insert */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_insert(signed char __a, vector signed char __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insert(unsigned char __a, vector unsigned char __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool char __ATTRS_o_ai vec_insert(unsigned char __a,
-                                                           vector bool char __b,
-                                                           int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_insert(signed short __a, vector signed short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector unsigned short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_insert(unsigned short __a, vector bool short __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_insert(signed int __a, vector signed int __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insert(unsigned int __a, vector unsigned int __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool int __ATTRS_o_ai vec_insert(unsigned int __a,
-                                                          vector bool int __b,
-                                                          int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_insert(signed long long __a, vector signed long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector unsigned long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_insert(unsigned long long __a, vector bool long long __b, int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-static __inline__ vector double __ATTRS_o_ai vec_insert(double __a,
-                                                        vector double __b,
-                                                        int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_insert(float __a,
-                                                       vector float __b,
-                                                       int __c) {
-  __b[__c] = __a;
-  return __b;
-}
-
-/* vec_lvlx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlx(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a, const int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlx(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlx(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm(vec_ld(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvlxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector signed char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector signed char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool char *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool char)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool short *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool short)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector pixel)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a, const int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvlxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector unsigned int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector unsigned int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvlxl(int __a, const vector bool int *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector bool int)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      const float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvlxl(int __a,
-                                                      vector float *__b) {
-  return vec_perm(vec_ldl(__a, __b), (vector float)(0),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrx */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrx(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrx(int __a,
-                                                   const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrx(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrx(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrx(int __a,
-                                                     const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ld(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_lvrxl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector signed char *__b) {
-  return vec_perm((vector signed char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned char *__b) {
-  return vec_perm((vector unsigned char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool char __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool char *__b) {
-  return vec_perm((vector bool char)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector short *__b) {
-  return vec_perm((vector short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned short *__b) {
-  return vec_perm((vector unsigned short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool short *__b) {
-  return vec_perm((vector bool short)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector pixel __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector pixel *__b) {
-  return vec_perm((vector pixel)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a, const int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_lvrxl(int __a,
-                                                    const vector int *__b) {
-  return vec_perm((vector int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, __b));
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector unsigned int *__b) {
-  return vec_perm((vector unsigned int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_lvrxl(int __a, const vector bool int *__b) {
-  return vec_perm((vector bool int)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b), vec_lvsl(__a, __b));
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_lvrxl(int __a,
-                                                      const vector float *__b) {
-  return vec_perm((vector float)(0), vec_ldl(__a, __b),
-                  vec_lvsl(__a, (unsigned char *)__b));
-}
-
-/* vec_stvlx */
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvlxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvlxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(vec_lvrx(__b, __c), __a, vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrx */
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              signed char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector signed char __a, int __b,
-                                              vector signed char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              unsigned char *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned char __a, int __b,
-                                              vector unsigned char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool char __a, int __b,
-                                              vector bool char *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector short __a, int __b,
-                                              vector short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b, unsigned short *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned short __a,
-                                              int __b,
-                                              vector unsigned short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool short __a, int __b,
-                                              vector bool short *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector pixel __a, int __b,
-                                              vector pixel *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector int __a, int __b,
-                                              vector int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              unsigned int *__c) {
-  return vec_st(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector unsigned int __a, int __b,
-                                              vector unsigned int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector bool int __a, int __b,
-                                              vector bool int *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrx(vector float __a, int __b,
-                                              vector float *__c) {
-  return vec_st(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_stvrxl */
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               signed char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector signed char __a, int __b,
-                                               vector signed char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b, unsigned char *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned char __a,
-                                               int __b,
-                                               vector unsigned char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool char __a, int __b,
-                                               vector bool char *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector short __a, int __b,
-                                               vector short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b, unsigned short *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned short __a,
-                                               int __b,
-                                               vector unsigned short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool short __a, int __b,
-                                               vector bool short *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector pixel __a, int __b,
-                                               vector pixel *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector int __a, int __b,
-                                               vector int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               unsigned int *__c) {
-  return vec_stl(vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, __c)), __b,
-                 __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector unsigned int __a, int __b,
-                                               vector unsigned int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector bool int __a, int __b,
-                                               vector bool int *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
-                                               vector float *__c) {
-  return vec_stl(
-      vec_perm(__a, vec_lvlx(__b, __c), vec_lvsr(__b, (unsigned char *)__c)),
-      __b, __c);
-}
-
-/* vec_promote */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
-                                                              int __b) {
-  vector signed char __res = (vector signed char)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_promote(unsigned char __a, int __b) {
-  vector unsigned char __res = (vector unsigned char)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
-  vector short __res = (vector short)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_promote(unsigned short __a, int __b) {
-  vector unsigned short __res = (vector unsigned short)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
-  vector int __res = (vector int)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
-                                                               int __b) {
-  vector unsigned int __res = (vector unsigned int)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
-  vector float __res = (vector float)(0);
-  __res[__b] = __a;
-  return __res;
-}
-
-/* vec_splats */
-
-static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
-  return (vector signed char)(__a);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_splats(unsigned char __a) {
-  return (vector unsigned char)(__a);
-}
-
-static __inline__ vector short __ATTRS_o_ai vec_splats(short __a) {
-  return (vector short)(__a);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_splats(unsigned short __a) {
-  return (vector unsigned short)(__a);
-}
-
-static __inline__ vector int __ATTRS_o_ai vec_splats(int __a) {
-  return (vector int)(__a);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_splats(unsigned int __a) {
-  return (vector unsigned int)(__a);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_splats(signed long long __a) {
-  return (vector signed long long)(__a);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_splats(unsigned long long __a) {
-  return (vector unsigned long long)(__a);
-}
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_splats(signed __int128 __a) {
-  return (vector signed __int128)(__a);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_splats(unsigned __int128 __a) {
-  return (vector unsigned __int128)(__a);
-}
-
-#endif
-
-static __inline__ vector double __ATTRS_o_ai vec_splats(double __a) {
-  return (vector double)(__a);
-}
-#endif
-
-static __inline__ vector float __ATTRS_o_ai vec_splats(float __a) {
-  return (vector float)(__a);
-}
-
-/* ----------------------------- predicates --------------------------------- */
-
-/* vec_all_eq */
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT, (vector long long)__a,
-                                      (vector long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_ge */
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
-}
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_gt */
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
-}
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __a, __b);
-}
-#endif
-
-/* vec_all_in */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_in(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ, __a, __b);
-}
-
-/* vec_all_le */
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __a, (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, __a, (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, __a, (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-/* vec_all_lt */
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, __b, (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, __b, (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_LT, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_LT, __b, __a);
-}
-#endif
-
-/* vec_all_nan */
-
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __a);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nan(vector double __a) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __a);
-}
-#endif
-
-/* vec_all_ne */
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ, (vector signed long long)__a,
-                                      (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nge */
-
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_nge(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_ngt */
-
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector float __a,
-                                               vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a,
-                                               vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __a, __b);
-}
-#endif
-
-/* vec_all_nle */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_nle(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
-}
-
-/* vec_all_nlt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_nlt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
-}
-
-/* vec_all_numeric */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_all_numeric(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
-}
-
-/* vec_any_eq */
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_EQ_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_ge */
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_gt */
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __a, __b);
-}
-#endif
-
-/* vec_any_le */
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __a,
-                                      (vector signed char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, __a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
-                                      (vector unsigned char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, __a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
-                                      (vector unsigned short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, __a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
-                                      (vector unsigned int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV, __a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
-                                      (vector unsigned long long)__a,
-                                      (vector unsigned long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_le(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgesp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgedp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-/* vec_any_lt */
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
-                                      (vector unsigned char)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
-                                      (vector unsigned short)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned int)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
-                                      (vector unsigned int)__a);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
-                                      (vector signed long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b, __a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV, __b,
-                                      (vector unsigned long long)__a);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
-                                      (vector unsigned long long)__b,
-                                      (vector unsigned long long)__a);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ_REV, __b, __a);
-#else
-  return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, __b, __a);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ_REV, __b, __a);
-}
-#endif
-
-/* vec_any_nan */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nan(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
-}
-
-/* vec_any_ne */
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector signed char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector unsigned char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool char __a,
-                                              vector bool char __b) {
-  return __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)__a,
-                                      (vector char)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, __a, (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector unsigned short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool short __a,
-                                              vector bool short __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector pixel __a,
-                                              vector pixel __b) {
-  return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, (vector short)__a,
-                                      (vector short)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a, vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, __a, (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector unsigned int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
-                                              vector bool int __b) {
-  return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)__a,
-                                      (vector int)__b);
-}
-
-#ifdef __POWER8_VECTOR__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, (vector long long)__a,
-                                      (vector long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a,
-                                      (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector signed long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector unsigned long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool long long __a,
-                                              vector bool long long __b) {
-  return __builtin_altivec_vcmpequd_p(
-      __CR6_LT_REV, (vector signed long long)__a, (vector signed long long)__b);
-}
-#endif
-
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector float __a,
-                                              vector float __b) {
-#ifdef __VSX__
-  return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __b);
-#else
-  return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __b);
-#endif
-}
-
-#ifdef __VSX__
-static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
-                                              vector double __b) {
-  return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __b);
-}
-#endif
-
-/* vec_any_nge */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nge(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
-}
-
-/* vec_any_ngt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_ngt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
-}
-
-/* vec_any_nle */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nle(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
-}
-
-/* vec_any_nlt */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nlt(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
-}
-
-/* vec_any_numeric */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_numeric(vector float __a) {
-  return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
-}
-
-/* vec_any_out */
-
-static __inline__ int __attribute__((__always_inline__))
-vec_any_out(vector float __a, vector float __b) {
-  return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, __a, __b);
-}
-
-/* Power 8 Crypto functions
-Note: We diverge from the current GCC implementation with regard
-to cryptography and related functions as follows:
-- Only the SHA and AES instructions and builtins are disabled by -mno-crypto
-- The remaining ones are only available on Power8 and up so
-  require -mpower8-vector
-The justification for this is that export requirements require that
-Category:Vector.Crypto is optional (i.e. compliant hardware may not provide
-support). As a result, we need to be able to turn off support for those.
-The remaining ones (currently controlled by -mcrypto for GCC) still
-need to be provided on compliant hardware even if Vector.Crypto is not
-provided.
-*/
-#ifdef __CRYPTO__
-#define vec_sbox_be __builtin_altivec_crypto_vsbox
-#define vec_cipher_be __builtin_altivec_crypto_vcipher
-#define vec_cipherlast_be __builtin_altivec_crypto_vcipherlast
-#define vec_ncipher_be __builtin_altivec_crypto_vncipher
-#define vec_ncipherlast_be __builtin_altivec_crypto_vncipherlast
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vsbox(vector unsigned long long __a) {
-  return __builtin_altivec_crypto_vsbox(__a);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipher(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vcipherlast(vector unsigned long long __a,
-                             vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vcipherlast(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipher(vector unsigned long long __a,
-                          vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipher(__a, __b);
-}
-
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-__builtin_crypto_vncipherlast(vector unsigned long long __a,
-                              vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vncipherlast(__a, __b);
-}
-
-#define __builtin_crypto_vshasigmad __builtin_altivec_crypto_vshasigmad
-#define __builtin_crypto_vshasigmaw __builtin_altivec_crypto_vshasigmaw
-
-#define vec_shasigma_be(X, Y, Z)                                               \
-  _Generic((X), vector unsigned int                                            \
-           : __builtin_crypto_vshasigmaw, vector unsigned long long            \
-           : __builtin_crypto_vshasigmad)((X), (Y), (Z))
-#endif
-
-#ifdef __POWER8_VECTOR__
-static __inline__ vector bool char __ATTRS_o_ai
-vec_permxor(vector bool char __a, vector bool char __b,
-            vector bool char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_permxor(vector signed char __a, vector signed char __b,
-            vector signed char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_permxor(vector unsigned char __a, vector unsigned char __b,
-            vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
-                          vector unsigned char __c) {
-  return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned short __a, vector unsigned short __b,
-                          vector unsigned short __c) {
-  return (vector unsigned short)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai __builtin_crypto_vpermxor(
-    vector unsigned int __a, vector unsigned int __b, vector unsigned int __c) {
-  return (vector unsigned int)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpermxor(vector unsigned long long __a,
-                          vector unsigned long long __b,
-                          vector unsigned long long __c) {
-  return (vector unsigned long long)__builtin_altivec_crypto_vpermxor(
-      (vector unsigned char)__a, (vector unsigned char)__b,
-      (vector unsigned char)__c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_crypto_vpmsumb(__a, __b);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned short __a, vector unsigned short __b) {
-  return __builtin_altivec_crypto_vpmsumh(__a, __b);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned int __a, vector unsigned int __b) {
-  return __builtin_altivec_crypto_vpmsumw(__a, __b);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-__builtin_crypto_vpmsumb(vector unsigned long long __a,
-                         vector unsigned long long __b) {
-  return __builtin_altivec_crypto_vpmsumd(__a, __b);
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_vgbbd(vector signed char __a) {
-  return __builtin_altivec_vgbbd((vector unsigned char)__a);
-}
-
-#define vec_pmsum_be __builtin_crypto_vpmsumb
-#define vec_gb __builtin_altivec_vgbbd
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_vgbbd(vector unsigned char __a) {
-  return __builtin_altivec_vgbbd(__a);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector signed char __a, vector signed char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-
-static __inline__ vector long long __ATTRS_o_ai
-vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq(__a, __b);
-}
-
-#ifdef __powerpc64__
-static __inline__ vector unsigned long long __attribute__((__always_inline__))
-vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
-  return __builtin_altivec_vbpermq((vector unsigned char)__a,
-                                   (vector unsigned char)__b);
-}
-#endif
-#endif
-
-
-/* vec_reve */
-
-static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_reve(vector unsigned char __a) {
-  return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
-                                 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned int
-vec_reve(vector unsigned int __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed short
-vec_reve(vector signed short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_reve(vector unsigned short __a) {
-  return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector float vec_reve(vector float __a) {
-  return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai vector bool long long
-vec_reve(vector bool long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector signed long long
-vec_reve(vector signed long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_reve(vector unsigned long long __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-
-static inline __ATTRS_o_ai vector double vec_reve(vector double __a) {
-  return __builtin_shufflevector(__a, __a, 1, 0);
-}
-#endif
-
-/* vec_revb */
-static __inline__ vector bool char __ATTRS_o_ai
-vec_revb(vector bool char __a) {
-  return __a;
-}
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_revb(vector signed char __a) {
-  return __a;
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_revb(vector unsigned char __a) {
-  return __a;
-}
-
-static __inline__ vector bool short __ATTRS_o_ai
-vec_revb(vector bool short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_revb(vector signed short __a) {
-  vector unsigned char __indices =
-      { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_revb(vector unsigned short __a) {
-  vector unsigned char __indices =
-     { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector bool int __ATTRS_o_ai
-vec_revb(vector bool int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_revb(vector signed int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_revb(vector unsigned int __a) {
-  vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_revb(vector float __a) {
- vector unsigned char __indices =
-      { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
- return vec_perm(__a, __a, __indices);
-}
-
-#ifdef __VSX__
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_revb(vector bool long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_revb(vector signed long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_revb(vector unsigned long long __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_revb(vector double __a) {
-  vector unsigned char __indices =
-      { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
-  return vec_perm(__a, __a, __indices);
-}
-#endif /* End __VSX__ */
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_revb(vector signed __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector signed __int128)vec_perm((vector signed int)__a,
-                                          (vector signed int)__a,
-                                           __indices);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_revb(vector unsigned __int128 __a) {
-  vector unsigned char __indices =
-      { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
-  return (vector unsigned __int128)vec_perm((vector signed int)__a,
-                                            (vector signed int)__a,
-                                             __indices);
-}
-#endif /* END __POWER8_VECTOR__ && __powerpc64__ */
-
-/* vec_xl */
-
-typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
-typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
-typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
-typedef vector unsigned short unaligned_vec_ushort __attribute__((aligned(1)));
-typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
-typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
-typedef vector float unaligned_vec_float __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
-                                                     signed char *__ptr) {
-  return *(unaligned_vec_schar *)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector unsigned char
-vec_xl(signed long long __offset, unsigned char *__ptr) {
-  return *(unaligned_vec_uchar*)(__ptr + __offset);
-}
-
-static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
-                                                      signed short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sshort *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned short
-vec_xl(signed long long __offset, unsigned short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ushort *)__addr;
-}
-
-static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
-                                                    signed int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sint *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
-                                                      unsigned int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_uint *)__addr;
-}
-
-static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
-                                               float *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_float *)__addr;
-}
-
-#ifdef __VSX__
-typedef vector signed long long unaligned_vec_sll __attribute__((aligned(1)));
-typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
-typedef vector double unaligned_vec_double __attribute__((aligned(1)));
-
-static inline __ATTRS_o_ai vector signed long long
-vec_xl(signed long long __offset, signed long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_sll *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(signed long long __offset, unsigned long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ull *)__addr;
-}
-
-static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
-                                                double *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_double *)__addr;
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
-typedef vector unsigned __int128 unaligned_vec_ui128
-    __attribute__((aligned(1)));
-static inline __ATTRS_o_ai vector signed __int128
-vec_xl(signed long long __offset, signed __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_si128 *)__addr;
-}
-
-static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  return *(unaligned_vec_ui128 *)__addr;
-}
-#endif
-
-/* vec_xl_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed char *__ptr) {
-  vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned char *__ptr) {
-  vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                                 13, 12, 11, 10, 9, 8);
-}
-
-static __inline__ vector signed short  __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed short *__ptr) {
-  vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned short *__ptr) {
-  vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-  return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed int *__ptr) {
-  return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned int *__ptr) {
-  return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, float *__ptr) {
-  return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed long long *__ptr) {
-  return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned long long *__ptr) {
-  return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, double *__ptr) {
-  return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned __int128 *__ptr) {
-  return vec_xl(__offset, __ptr);
-}
-#endif
-#else
-  #define vec_xl_be vec_xl
-#endif
-
-/* vec_xst */
-
-static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
-                                        signed long long __offset,
-                                        signed char *__ptr) {
-  *(unaligned_vec_schar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
-                                        signed long long __offset,
-                                        unsigned char *__ptr) {
-  *(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
-                                        signed long long __offset,
-                                        signed short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sshort *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
-                                        signed long long __offset,
-                                        unsigned short *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ushort *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
-                                        signed long long __offset,
-                                        signed int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sint *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
-                                        signed long long __offset,
-                                        unsigned int *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_uint *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector float __vec,
-                                        signed long long __offset,
-                                        float *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_float *)__addr = __vec;
-}
-
-#ifdef __VSX__
-static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
-                                        signed long long __offset,
-                                        signed long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_sll *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
-                                        signed long long __offset,
-                                        unsigned long long *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ull *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector double __vec,
-                                        signed long long __offset,
-                                        double *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_double *)__addr = __vec;
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
-                                        signed long long __offset,
-                                        signed __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_si128 *)__addr = __vec;
-}
-
-static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
-                                        signed long long __offset,
-                                        unsigned __int128 *__ptr) {
-  signed char *__addr = (signed char *)__ptr + __offset;
-  *(unaligned_vec_ui128 *)__addr = __vec;
-}
-#endif
-
-/* vec_xst_be */
-
-#ifdef __LITTLE_ENDIAN__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec,
-                                               signed long long  __offset,
-                                               signed char *__ptr) {
-  vector signed char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec,
-                                               signed long long  __offset,
-                                               unsigned char *__ptr) {
-  vector unsigned char __tmp =
-     __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
-                             13, 12, 11, 10, 9, 8);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec,
-                                               signed long long  __offset,
-                                               signed short *__ptr) {
-  vector signed short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec,
-                                               signed long long  __offset,
-                                               unsigned short *__ptr) {
-  vector unsigned short __tmp =
-     __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
-  typedef __attribute__((vector_size(sizeof(__tmp)))) double __vector_double;
-  __builtin_vsx_stxvd2x_be((__vector_double)__tmp, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec,
-                                               signed long long  __offset,
-                                               signed int *__ptr) {
-  __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec,
-                                               signed long long  __offset,
-                                               unsigned int *__ptr) {
-  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec,
-                                               signed long long  __offset,
-                                               float *__ptr) {
-  __builtin_vsx_stxvw4x_be((vector int)__vec, __offset, __ptr);
-}
-
-#ifdef __VSX__
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec,
-                                               signed long long  __offset,
-                                               signed long long *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec,
-                                               signed long long  __offset,
-                                               unsigned long long *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
-                                               signed long long  __offset,
-                                               double *__ptr) {
-  __builtin_vsx_stxvd2x_be((vector double)__vec, __offset, __ptr);
-}
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec,
-                                               signed long long  __offset,
-                                               signed __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-
-static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned __int128 __vec,
-                                               signed long long  __offset,
-                                               unsigned __int128 *__ptr) {
-  vec_xst(__vec, __offset, __ptr);
-}
-#endif
-#else
-  #define vec_xst_be vec_xst
-#endif
-
-#ifdef __POWER9_VECTOR__
-#define vec_test_data_class(__a, __b)                                          \
-  _Generic(                                                                    \
-      (__a), vector float                                                      \
-      : (vector bool int)__builtin_vsx_xvtstdcsp((vector float)(__a), (__b)),  \
-        vector double                                                          \
-      : (vector bool long long)__builtin_vsx_xvtstdcdp((vector double)(__a),   \
-                                                       (__b)))
-
-#endif /* #ifdef __POWER9_VECTOR__ */
-
-static vector float __ATTRS_o_ai vec_neg(vector float __a) {
-  return -__a;
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_neg(vector double __a) {
-  return -__a;
-}
-
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector long long __ATTRS_o_ai vec_neg(vector long long __a) {
-  return -__a;
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_neg(vector signed int __a) {
-  return -__a;
-}
-
-static vector signed short __ATTRS_o_ai vec_neg(vector signed short __a) {
-  return -__a;
-}
-
-static vector signed char __ATTRS_o_ai vec_neg(vector signed char __a) {
-  return -__a;
-}
-
-static vector float __ATTRS_o_ai vec_nabs(vector float __a) {
-  return - vec_abs(__a);
-}
-
-#ifdef __VSX__
-static vector double __ATTRS_o_ai vec_nabs(vector double __a) {
-  return - vec_abs(__a);
-}
-
-#endif
-
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
-static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) {
-  return __builtin_altivec_vminsd(__a, -__a);
-}
-#endif
-
-static vector signed int __ATTRS_o_ai vec_nabs(vector signed int __a) {
-  return __builtin_altivec_vminsw(__a, -__a);
-}
-
-static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) {
-  return __builtin_altivec_vminsh(__a, -__a);
-}
-
-static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
-  return __builtin_altivec_vminsb(__a, -__a);
-}
-
-#ifdef __POWER10_VECTOR__
-/* vec_pdep */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_pdep(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vpdepd(__a, __b);
-}
-
-/* vec_pext */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_pext(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vpextd(__a, __b);
-}
-
-/* vec_cfuge */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vcfuged(__a, __b);
-}
-
-/* vec_gnb */
-
-#define vec_gnb(__a, __b) __builtin_altivec_vgnb(__a, __b)
-
-/* vec_ternarylogic */
-#ifdef __VSX__
-#define vec_ternarylogic(__a, __b, __c, __imm)                                 \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned short                                             \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned int                                               \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned long long                                         \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)),  \
-             vector unsigned __int128                                          \
-           : __builtin_vsx_xxeval((vector unsigned long long)(__a),            \
-                                  (vector unsigned long long)(__b),            \
-                                  (vector unsigned long long)(__c), (__imm)))
-#endif /* __VSX__ */
-
-/* vec_genpcvm */
-
-#ifdef __VSX__
-#define vec_genpcvm(__a, __imm)                                                \
-  _Generic((__a), vector unsigned char                                         \
-           : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)),                    \
-             vector unsigned short                                             \
-           : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)),                    \
-             vector unsigned int                                               \
-           : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)),                    \
-             vector unsigned long long                                         \
-           : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
-#endif /* __VSX__ */
-
-/* vec_clrl */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_clrl(vector signed char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb(__a, __n);
-#else
-  return __builtin_altivec_vclrlb( __a, __n);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_clrl(vector unsigned char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
-#else
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
-#endif
-}
-
-/* vec_clrr */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_clrr(vector signed char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb(__a, __n);
-#else
-  return __builtin_altivec_vclrrb( __a, __n);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_clrr(vector unsigned char __a, unsigned int __n) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vclrlb((vector signed char)__a, __n);
-#else
-  return __builtin_altivec_vclrrb((vector signed char)__a, __n);
-#endif
-}
-
-/* vec_cntlzm */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cntlzm(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vclzdm(__a, __b);
-}
-
-/* vec_cnttzm */
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_cnttzm(vector unsigned long long __a, vector unsigned long long __b) {
-  return __builtin_altivec_vctzdm(__a, __b);
-}
-
-/* vec_sldbi */
-
-#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
-
-/* vec_srdbi */
-
-#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
-
-/* vec_insertl */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insertl(unsigned char __a, vector unsigned char __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsbrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsblx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insertl(unsigned short __a, vector unsigned short __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insertl(unsigned int __a, vector unsigned int __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_insertl(unsigned long long __a, vector unsigned long long __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsdrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsdlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_insertl(vector unsigned char __a, vector unsigned char __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsbvrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsbvlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_insertl(vector unsigned short __a, vector unsigned short __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshvrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshvlx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_insertl(vector unsigned int __a, vector unsigned int __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswvrx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswvlx(__b, __c, __a);
-#endif
-}
-
-/* vec_inserth */
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_inserth(unsigned char __a, vector unsigned char __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsblx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsbrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_inserth(unsigned short __a, vector unsigned short __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_inserth(unsigned int __a, vector unsigned int __b, unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_inserth(unsigned long long __a, vector unsigned long long __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsdlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsdrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_inserth(vector unsigned char __a, vector unsigned char __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinsbvlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinsbvrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_inserth(vector unsigned short __a, vector unsigned short __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinshvlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinshvrx(__b, __c, __a);
-#endif
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_inserth(vector unsigned int __a, vector unsigned int __b,
-            unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  return __builtin_altivec_vinswvlx(__b, __c, __a);
-#else
-  return __builtin_altivec_vinswvrx(__b, __c, __a);
-#endif
-}
-
-#ifdef __VSX__
-
-/* vec_permx */
-
-#define vec_permx(__a, __b, __c, __d)                                          \
-  __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
-
-/* vec_blendv */
-
-static __inline__ vector signed char __ATTRS_o_ai
-vec_blendv(vector signed char __a, vector signed char __b,
-           vector unsigned char __c) {
-  return __builtin_vsx_xxblendvb(__a, __b, __c);
-}
-
-static __inline__ vector unsigned char __ATTRS_o_ai
-vec_blendv(vector unsigned char __a, vector unsigned char __b,
-           vector unsigned char __c) {
-  return __builtin_vsx_xxblendvb(__a, __b, __c);
-}
-
-static __inline__ vector signed short __ATTRS_o_ai
-vec_blendv(vector signed short __a, vector signed short __b,
-           vector unsigned short __c) {
-  return __builtin_vsx_xxblendvh(__a, __b, __c);
-}
-
-static __inline__ vector unsigned short __ATTRS_o_ai
-vec_blendv(vector unsigned short __a, vector unsigned short __b,
-           vector unsigned short __c) {
-  return __builtin_vsx_xxblendvh(__a, __b, __c);
-}
-
-static __inline__ vector signed int __ATTRS_o_ai
-vec_blendv(vector signed int __a, vector signed int __b,
-           vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai
-vec_blendv(vector unsigned int __a, vector unsigned int __b,
-           vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
-}
-
-static __inline__ vector signed long long __ATTRS_o_ai
-vec_blendv(vector signed long long __a, vector signed long long __b,
-           vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
-}
-
-static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
-           vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
-  return __builtin_vsx_xxblendvw(__a, __b, __c);
-}
-
-static __inline__ vector double __ATTRS_o_ai
-vec_blendv(vector double __a, vector double __b,
-           vector unsigned long long __c) {
-  return __builtin_vsx_xxblendvd(__a, __b, __c);
-}
-
-/* vec_splati */
-
-#define vec_splati(__a)                                                        \
-  _Generic((__a), signed int                                                   \
-           : ((vector signed int)__a), unsigned int                            \
-           : ((vector unsigned int)__a), float                                 \
-           : ((vector float)__a))
-
-/* vec_spatid */
-
-static __inline__ vector double __ATTRS_o_ai vec_splatid(const float __a) {
-  return ((vector double)((double)__a));
-}
-
-/* vec_splati_ins */
-
-static __inline__ vector signed int __ATTRS_o_ai vec_splati_ins(
-    vector signed int __a, const unsigned int __b, const signed int __c) {
-#ifdef __LITTLE_ENDIAN__
-  __a[1 - __b] = __c;
-  __a[3 - __b] = __c;
-#else
-  __a[__b] = __c;
-  __a[2 + __b] = __c;
-#endif
-  return __a;
-}
-
-static __inline__ vector unsigned int __ATTRS_o_ai vec_splati_ins(
-    vector unsigned int __a, const unsigned int __b, const unsigned int __c) {
-#ifdef __LITTLE_ENDIAN__
-  __a[1 - __b] = __c;
-  __a[3 - __b] = __c;
-#else
-  __a[__b] = __c;
-  __a[2 + __b] = __c;
-#endif
-  return __a;
-}
-
-static __inline__ vector float __ATTRS_o_ai
-vec_splati_ins(vector float __a, const unsigned int __b, const float __c) {
-#ifdef __LITTLE_ENDIAN__
-  __a[1 - __b] = __c;
-  __a[3 - __b] = __c;
-#else
-  __a[__b] = __c;
-  __a[2 + __b] = __c;
-#endif
-  return __a;
-}
-#endif /* __VSX__ */
-#endif /* __POWER10_VECTOR__ */
-
-#undef __ATTRS_o_ai
-
-#endif /* __ALTIVEC_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_neon.h b/linux-x86/lib64/clang/11.0.5/include/arm_neon.h
deleted file mode 100644
index da1e17c..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/arm_neon.h
+++ /dev/null
@@ -1,67312 +0,0 @@
-/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------===
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __ARM_NEON_H
-#define __ARM_NEON_H
-
-#ifndef __ARM_FP
-#error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard"
-#else
-
-#if !defined(__ARM_NEON)
-#error "NEON support not enabled"
-#else
-
-#include <stdint.h>
-
-#ifdef __ARM_FEATURE_BF16
-#include <arm_bf16.h>
-typedef __bf16 bfloat16_t;
-#endif
-
-typedef float float32_t;
-typedef __fp16 float16_t;
-#ifdef __aarch64__
-typedef double float64_t;
-#endif
-
-#ifdef __aarch64__
-typedef uint8_t poly8_t;
-typedef uint16_t poly16_t;
-typedef uint64_t poly64_t;
-typedef __uint128_t poly128_t;
-#else
-typedef int8_t poly8_t;
-typedef int16_t poly16_t;
-typedef int64_t poly64_t;
-#endif
-typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t;
-typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t;
-typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t;
-typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t;
-typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t;
-typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t;
-typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t;
-typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t;
-typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t;
-typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t;
-typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t;
-typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t;
-typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t;
-typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t;
-typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t;
-typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t;
-typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t;
-typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t;
-typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t;
-typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t;
-#ifdef __aarch64__
-typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t;
-typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t;
-#endif
-typedef __attribute__((neon_polyvector_type(8))) poly8_t poly8x8_t;
-typedef __attribute__((neon_polyvector_type(16))) poly8_t poly8x16_t;
-typedef __attribute__((neon_polyvector_type(4))) poly16_t poly16x4_t;
-typedef __attribute__((neon_polyvector_type(8))) poly16_t poly16x8_t;
-typedef __attribute__((neon_polyvector_type(1))) poly64_t poly64x1_t;
-typedef __attribute__((neon_polyvector_type(2))) poly64_t poly64x2_t;
-
-typedef struct int8x8x2_t {
-  int8x8_t val[2];
-} int8x8x2_t;
-
-typedef struct int8x16x2_t {
-  int8x16_t val[2];
-} int8x16x2_t;
-
-typedef struct int16x4x2_t {
-  int16x4_t val[2];
-} int16x4x2_t;
-
-typedef struct int16x8x2_t {
-  int16x8_t val[2];
-} int16x8x2_t;
-
-typedef struct int32x2x2_t {
-  int32x2_t val[2];
-} int32x2x2_t;
-
-typedef struct int32x4x2_t {
-  int32x4_t val[2];
-} int32x4x2_t;
-
-typedef struct int64x1x2_t {
-  int64x1_t val[2];
-} int64x1x2_t;
-
-typedef struct int64x2x2_t {
-  int64x2_t val[2];
-} int64x2x2_t;
-
-typedef struct uint8x8x2_t {
-  uint8x8_t val[2];
-} uint8x8x2_t;
-
-typedef struct uint8x16x2_t {
-  uint8x16_t val[2];
-} uint8x16x2_t;
-
-typedef struct uint16x4x2_t {
-  uint16x4_t val[2];
-} uint16x4x2_t;
-
-typedef struct uint16x8x2_t {
-  uint16x8_t val[2];
-} uint16x8x2_t;
-
-typedef struct uint32x2x2_t {
-  uint32x2_t val[2];
-} uint32x2x2_t;
-
-typedef struct uint32x4x2_t {
-  uint32x4_t val[2];
-} uint32x4x2_t;
-
-typedef struct uint64x1x2_t {
-  uint64x1_t val[2];
-} uint64x1x2_t;
-
-typedef struct uint64x2x2_t {
-  uint64x2_t val[2];
-} uint64x2x2_t;
-
-typedef struct float16x4x2_t {
-  float16x4_t val[2];
-} float16x4x2_t;
-
-typedef struct float16x8x2_t {
-  float16x8_t val[2];
-} float16x8x2_t;
-
-typedef struct float32x2x2_t {
-  float32x2_t val[2];
-} float32x2x2_t;
-
-typedef struct float32x4x2_t {
-  float32x4_t val[2];
-} float32x4x2_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x2_t {
-  float64x1_t val[2];
-} float64x1x2_t;
-
-typedef struct float64x2x2_t {
-  float64x2_t val[2];
-} float64x2x2_t;
-
-#endif
-typedef struct poly8x8x2_t {
-  poly8x8_t val[2];
-} poly8x8x2_t;
-
-typedef struct poly8x16x2_t {
-  poly8x16_t val[2];
-} poly8x16x2_t;
-
-typedef struct poly16x4x2_t {
-  poly16x4_t val[2];
-} poly16x4x2_t;
-
-typedef struct poly16x8x2_t {
-  poly16x8_t val[2];
-} poly16x8x2_t;
-
-typedef struct poly64x1x2_t {
-  poly64x1_t val[2];
-} poly64x1x2_t;
-
-typedef struct poly64x2x2_t {
-  poly64x2_t val[2];
-} poly64x2x2_t;
-
-typedef struct int8x8x3_t {
-  int8x8_t val[3];
-} int8x8x3_t;
-
-typedef struct int8x16x3_t {
-  int8x16_t val[3];
-} int8x16x3_t;
-
-typedef struct int16x4x3_t {
-  int16x4_t val[3];
-} int16x4x3_t;
-
-typedef struct int16x8x3_t {
-  int16x8_t val[3];
-} int16x8x3_t;
-
-typedef struct int32x2x3_t {
-  int32x2_t val[3];
-} int32x2x3_t;
-
-typedef struct int32x4x3_t {
-  int32x4_t val[3];
-} int32x4x3_t;
-
-typedef struct int64x1x3_t {
-  int64x1_t val[3];
-} int64x1x3_t;
-
-typedef struct int64x2x3_t {
-  int64x2_t val[3];
-} int64x2x3_t;
-
-typedef struct uint8x8x3_t {
-  uint8x8_t val[3];
-} uint8x8x3_t;
-
-typedef struct uint8x16x3_t {
-  uint8x16_t val[3];
-} uint8x16x3_t;
-
-typedef struct uint16x4x3_t {
-  uint16x4_t val[3];
-} uint16x4x3_t;
-
-typedef struct uint16x8x3_t {
-  uint16x8_t val[3];
-} uint16x8x3_t;
-
-typedef struct uint32x2x3_t {
-  uint32x2_t val[3];
-} uint32x2x3_t;
-
-typedef struct uint32x4x3_t {
-  uint32x4_t val[3];
-} uint32x4x3_t;
-
-typedef struct uint64x1x3_t {
-  uint64x1_t val[3];
-} uint64x1x3_t;
-
-typedef struct uint64x2x3_t {
-  uint64x2_t val[3];
-} uint64x2x3_t;
-
-typedef struct float16x4x3_t {
-  float16x4_t val[3];
-} float16x4x3_t;
-
-typedef struct float16x8x3_t {
-  float16x8_t val[3];
-} float16x8x3_t;
-
-typedef struct float32x2x3_t {
-  float32x2_t val[3];
-} float32x2x3_t;
-
-typedef struct float32x4x3_t {
-  float32x4_t val[3];
-} float32x4x3_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x3_t {
-  float64x1_t val[3];
-} float64x1x3_t;
-
-typedef struct float64x2x3_t {
-  float64x2_t val[3];
-} float64x2x3_t;
-
-#endif
-typedef struct poly8x8x3_t {
-  poly8x8_t val[3];
-} poly8x8x3_t;
-
-typedef struct poly8x16x3_t {
-  poly8x16_t val[3];
-} poly8x16x3_t;
-
-typedef struct poly16x4x3_t {
-  poly16x4_t val[3];
-} poly16x4x3_t;
-
-typedef struct poly16x8x3_t {
-  poly16x8_t val[3];
-} poly16x8x3_t;
-
-typedef struct poly64x1x3_t {
-  poly64x1_t val[3];
-} poly64x1x3_t;
-
-typedef struct poly64x2x3_t {
-  poly64x2_t val[3];
-} poly64x2x3_t;
-
-typedef struct int8x8x4_t {
-  int8x8_t val[4];
-} int8x8x4_t;
-
-typedef struct int8x16x4_t {
-  int8x16_t val[4];
-} int8x16x4_t;
-
-typedef struct int16x4x4_t {
-  int16x4_t val[4];
-} int16x4x4_t;
-
-typedef struct int16x8x4_t {
-  int16x8_t val[4];
-} int16x8x4_t;
-
-typedef struct int32x2x4_t {
-  int32x2_t val[4];
-} int32x2x4_t;
-
-typedef struct int32x4x4_t {
-  int32x4_t val[4];
-} int32x4x4_t;
-
-typedef struct int64x1x4_t {
-  int64x1_t val[4];
-} int64x1x4_t;
-
-typedef struct int64x2x4_t {
-  int64x2_t val[4];
-} int64x2x4_t;
-
-typedef struct uint8x8x4_t {
-  uint8x8_t val[4];
-} uint8x8x4_t;
-
-typedef struct uint8x16x4_t {
-  uint8x16_t val[4];
-} uint8x16x4_t;
-
-typedef struct uint16x4x4_t {
-  uint16x4_t val[4];
-} uint16x4x4_t;
-
-typedef struct uint16x8x4_t {
-  uint16x8_t val[4];
-} uint16x8x4_t;
-
-typedef struct uint32x2x4_t {
-  uint32x2_t val[4];
-} uint32x2x4_t;
-
-typedef struct uint32x4x4_t {
-  uint32x4_t val[4];
-} uint32x4x4_t;
-
-typedef struct uint64x1x4_t {
-  uint64x1_t val[4];
-} uint64x1x4_t;
-
-typedef struct uint64x2x4_t {
-  uint64x2_t val[4];
-} uint64x2x4_t;
-
-typedef struct float16x4x4_t {
-  float16x4_t val[4];
-} float16x4x4_t;
-
-typedef struct float16x8x4_t {
-  float16x8_t val[4];
-} float16x8x4_t;
-
-typedef struct float32x2x4_t {
-  float32x2_t val[4];
-} float32x2x4_t;
-
-typedef struct float32x4x4_t {
-  float32x4_t val[4];
-} float32x4x4_t;
-
-#ifdef __aarch64__
-typedef struct float64x1x4_t {
-  float64x1_t val[4];
-} float64x1x4_t;
-
-typedef struct float64x2x4_t {
-  float64x2_t val[4];
-} float64x2x4_t;
-
-#endif
-typedef struct poly8x8x4_t {
-  poly8x8_t val[4];
-} poly8x8x4_t;
-
-typedef struct poly8x16x4_t {
-  poly8x16_t val[4];
-} poly8x16x4_t;
-
-typedef struct poly16x4x4_t {
-  poly16x4_t val[4];
-} poly16x4x4_t;
-
-typedef struct poly16x8x4_t {
-  poly16x8_t val[4];
-} poly16x8x4_t;
-
-typedef struct poly64x1x4_t {
-  poly64x1_t val[4];
-} poly64x1x4_t;
-
-typedef struct poly64x2x4_t {
-  poly64x2_t val[4];
-} poly64x2x4_t;
-
-#ifdef __ARM_FEATURE_BF16
-typedef __attribute__((neon_vector_type(4))) bfloat16_t bfloat16x4_t;
-typedef __attribute__((neon_vector_type(8))) bfloat16_t bfloat16x8_t;
-
-typedef struct bfloat16x4x2_t {
-  bfloat16x4_t val[2];
-} bfloat16x4x2_t;
-
-typedef struct bfloat16x8x2_t {
-  bfloat16x8_t val[2];
-} bfloat16x8x2_t;
-
-typedef struct bfloat16x4x3_t {
-  bfloat16x4_t val[3];
-} bfloat16x4x3_t;
-
-typedef struct bfloat16x8x3_t {
-  bfloat16x8_t val[3];
-} bfloat16x8x3_t;
-
-typedef struct bfloat16x4x4_t {
-  bfloat16x4_t val[4];
-} bfloat16x4x4_t;
-
-typedef struct bfloat16x8x4_t {
-  bfloat16x8_t val[4];
-} bfloat16x8x4_t;
-
-#endif
-
-#define __ai static __inline__ __attribute__((__always_inline__, __nodebug__))
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#else
-#define splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#else
-#define splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#else
-#define splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#else
-#define splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#else
-#define splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#else
-#define splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#else
-#define splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#else
-#define splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#else
-#define splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#else
-#define splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#else
-#define splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#else
-#define splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#define splat_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#else
-#define splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#else
-#define splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#else
-#define splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#else
-#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#else
-#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#else
-#define splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#else
-#define splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#else
-#define splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vabsq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vabs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 & ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 & ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcls_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcls_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcls_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vclzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vclzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vclzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vclz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vclz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vclz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vcnt_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcntq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcntq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vcntq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcnt_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vcnt_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (uint16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int8x8_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float16x4_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int32x2_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (int16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \
-  poly8x8_t __s0_0 = __p0_0; \
-  poly8x8_t __ret_0; \
-  __ret_0 = splat_lane_p8(__s0_0, __p1_0); \
-  __ret_0; \
-})
-#else
-#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \
-  poly8x8_t __s0_1 = __p0_1; \
-  poly8x8_t __rev0_1;  __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_1; \
-  __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \
-  __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_1; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \
-  poly16x4_t __s0_2 = __p0_2; \
-  poly16x4_t __ret_2; \
-  __ret_2 = splat_lane_p16(__s0_2, __p1_2); \
-  __ret_2; \
-})
-#else
-#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \
-  poly16x4_t __s0_3 = __p0_3; \
-  poly16x4_t __rev0_3;  __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \
-  poly16x4_t __ret_3; \
-  __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \
-  __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \
-  __ret_3; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \
-  poly8x8_t __s0_4 = __p0_4; \
-  poly8x16_t __ret_4; \
-  __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \
-  __ret_4; \
-})
-#else
-#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \
-  poly8x8_t __s0_5 = __p0_5; \
-  poly8x8_t __rev0_5;  __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_5; \
-  __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \
-  __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_5; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \
-  poly16x4_t __s0_6 = __p0_6; \
-  poly16x8_t __ret_6; \
-  __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \
-  __ret_6; \
-})
-#else
-#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \
-  poly16x4_t __s0_7 = __p0_7; \
-  poly16x4_t __rev0_7;  __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \
-  poly16x8_t __ret_7; \
-  __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \
-  __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_7; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \
-  uint8x8_t __s0_8 = __p0_8; \
-  uint8x16_t __ret_8; \
-  __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \
-  __ret_8; \
-})
-#else
-#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \
-  uint8x8_t __s0_9 = __p0_9; \
-  uint8x8_t __rev0_9;  __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_9; \
-  __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \
-  __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_9; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \
-  uint32x2_t __s0_10 = __p0_10; \
-  uint32x4_t __ret_10; \
-  __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \
-  __ret_10; \
-})
-#else
-#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \
-  uint32x2_t __s0_11 = __p0_11; \
-  uint32x2_t __rev0_11;  __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \
-  uint32x4_t __ret_11; \
-  __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \
-  __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \
-  __ret_11; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \
-  uint64x1_t __s0_12 = __p0_12; \
-  uint64x2_t __ret_12; \
-  __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \
-  __ret_12; \
-})
-#else
-#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \
-  uint64x1_t __s0_13 = __p0_13; \
-  uint64x2_t __ret_13; \
-  __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \
-  __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \
-  __ret_13; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \
-  uint16x4_t __s0_14 = __p0_14; \
-  uint16x8_t __ret_14; \
-  __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \
-  __ret_14; \
-})
-#else
-#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \
-  uint16x4_t __s0_15 = __p0_15; \
-  uint16x4_t __rev0_15;  __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \
-  uint16x8_t __ret_15; \
-  __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \
-  __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_15; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \
-  int8x8_t __s0_16 = __p0_16; \
-  int8x16_t __ret_16; \
-  __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \
-  __ret_16; \
-})
-#else
-#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \
-  int8x8_t __s0_17 = __p0_17; \
-  int8x8_t __rev0_17;  __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_17; \
-  __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \
-  __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_17; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \
-  float32x2_t __s0_18 = __p0_18; \
-  float32x4_t __ret_18; \
-  __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \
-  __ret_18; \
-})
-#else
-#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \
-  float32x2_t __s0_19 = __p0_19; \
-  float32x2_t __rev0_19;  __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \
-  float32x4_t __ret_19; \
-  __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \
-  __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \
-  __ret_19; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s32(__p0_20, __p1_20) __extension__ ({ \
-  int32x2_t __s0_20 = __p0_20; \
-  int32x4_t __ret_20; \
-  __ret_20 = splatq_lane_s32(__s0_20, __p1_20); \
-  __ret_20; \
-})
-#else
-#define vdupq_lane_s32(__p0_21, __p1_21) __extension__ ({ \
-  int32x2_t __s0_21 = __p0_21; \
-  int32x2_t __rev0_21;  __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 1, 0); \
-  int32x4_t __ret_21; \
-  __ret_21 = __noswap_splatq_lane_s32(__rev0_21, __p1_21); \
-  __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 3, 2, 1, 0); \
-  __ret_21; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s64(__p0_22, __p1_22) __extension__ ({ \
-  int64x1_t __s0_22 = __p0_22; \
-  int64x2_t __ret_22; \
-  __ret_22 = splatq_lane_s64(__s0_22, __p1_22); \
-  __ret_22; \
-})
-#else
-#define vdupq_lane_s64(__p0_23, __p1_23) __extension__ ({ \
-  int64x1_t __s0_23 = __p0_23; \
-  int64x2_t __ret_23; \
-  __ret_23 = __noswap_splatq_lane_s64(__s0_23, __p1_23); \
-  __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 1, 0); \
-  __ret_23; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_s16(__p0_24, __p1_24) __extension__ ({ \
-  int16x4_t __s0_24 = __p0_24; \
-  int16x8_t __ret_24; \
-  __ret_24 = splatq_lane_s16(__s0_24, __p1_24); \
-  __ret_24; \
-})
-#else
-#define vdupq_lane_s16(__p0_25, __p1_25) __extension__ ({ \
-  int16x4_t __s0_25 = __p0_25; \
-  int16x4_t __rev0_25;  __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 3, 2, 1, 0); \
-  int16x8_t __ret_25; \
-  __ret_25 = __noswap_splatq_lane_s16(__rev0_25, __p1_25); \
-  __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_25; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u8(__p0_26, __p1_26) __extension__ ({ \
-  uint8x8_t __s0_26 = __p0_26; \
-  uint8x8_t __ret_26; \
-  __ret_26 = splat_lane_u8(__s0_26, __p1_26); \
-  __ret_26; \
-})
-#else
-#define vdup_lane_u8(__p0_27, __p1_27) __extension__ ({ \
-  uint8x8_t __s0_27 = __p0_27; \
-  uint8x8_t __rev0_27;  __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_27; \
-  __ret_27 = __noswap_splat_lane_u8(__rev0_27, __p1_27); \
-  __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_27; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u32(__p0_28, __p1_28) __extension__ ({ \
-  uint32x2_t __s0_28 = __p0_28; \
-  uint32x2_t __ret_28; \
-  __ret_28 = splat_lane_u32(__s0_28, __p1_28); \
-  __ret_28; \
-})
-#else
-#define vdup_lane_u32(__p0_29, __p1_29) __extension__ ({ \
-  uint32x2_t __s0_29 = __p0_29; \
-  uint32x2_t __rev0_29;  __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 1, 0); \
-  uint32x2_t __ret_29; \
-  __ret_29 = __noswap_splat_lane_u32(__rev0_29, __p1_29); \
-  __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 1, 0); \
-  __ret_29; \
-})
-#endif
-
-#define vdup_lane_u64(__p0_30, __p1_30) __extension__ ({ \
-  uint64x1_t __s0_30 = __p0_30; \
-  uint64x1_t __ret_30; \
-  __ret_30 = splat_lane_u64(__s0_30, __p1_30); \
-  __ret_30; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_u16(__p0_31, __p1_31) __extension__ ({ \
-  uint16x4_t __s0_31 = __p0_31; \
-  uint16x4_t __ret_31; \
-  __ret_31 = splat_lane_u16(__s0_31, __p1_31); \
-  __ret_31; \
-})
-#else
-#define vdup_lane_u16(__p0_32, __p1_32) __extension__ ({ \
-  uint16x4_t __s0_32 = __p0_32; \
-  uint16x4_t __rev0_32;  __rev0_32 = __builtin_shufflevector(__s0_32, __s0_32, 3, 2, 1, 0); \
-  uint16x4_t __ret_32; \
-  __ret_32 = __noswap_splat_lane_u16(__rev0_32, __p1_32); \
-  __ret_32 = __builtin_shufflevector(__ret_32, __ret_32, 3, 2, 1, 0); \
-  __ret_32; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s8(__p0_33, __p1_33) __extension__ ({ \
-  int8x8_t __s0_33 = __p0_33; \
-  int8x8_t __ret_33; \
-  __ret_33 = splat_lane_s8(__s0_33, __p1_33); \
-  __ret_33; \
-})
-#else
-#define vdup_lane_s8(__p0_34, __p1_34) __extension__ ({ \
-  int8x8_t __s0_34 = __p0_34; \
-  int8x8_t __rev0_34;  __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_34; \
-  __ret_34 = __noswap_splat_lane_s8(__rev0_34, __p1_34); \
-  __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_34; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f32(__p0_35, __p1_35) __extension__ ({ \
-  float32x2_t __s0_35 = __p0_35; \
-  float32x2_t __ret_35; \
-  __ret_35 = splat_lane_f32(__s0_35, __p1_35); \
-  __ret_35; \
-})
-#else
-#define vdup_lane_f32(__p0_36, __p1_36) __extension__ ({ \
-  float32x2_t __s0_36 = __p0_36; \
-  float32x2_t __rev0_36;  __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 1, 0); \
-  float32x2_t __ret_36; \
-  __ret_36 = __noswap_splat_lane_f32(__rev0_36, __p1_36); \
-  __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 1, 0); \
-  __ret_36; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s32(__p0_37, __p1_37) __extension__ ({ \
-  int32x2_t __s0_37 = __p0_37; \
-  int32x2_t __ret_37; \
-  __ret_37 = splat_lane_s32(__s0_37, __p1_37); \
-  __ret_37; \
-})
-#else
-#define vdup_lane_s32(__p0_38, __p1_38) __extension__ ({ \
-  int32x2_t __s0_38 = __p0_38; \
-  int32x2_t __rev0_38;  __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \
-  int32x2_t __ret_38; \
-  __ret_38 = __noswap_splat_lane_s32(__rev0_38, __p1_38); \
-  __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \
-  __ret_38; \
-})
-#endif
-
-#define vdup_lane_s64(__p0_39, __p1_39) __extension__ ({ \
-  int64x1_t __s0_39 = __p0_39; \
-  int64x1_t __ret_39; \
-  __ret_39 = splat_lane_s64(__s0_39, __p1_39); \
-  __ret_39; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_s16(__p0_40, __p1_40) __extension__ ({ \
-  int16x4_t __s0_40 = __p0_40; \
-  int16x4_t __ret_40; \
-  __ret_40 = splat_lane_s16(__s0_40, __p1_40); \
-  __ret_40; \
-})
-#else
-#define vdup_lane_s16(__p0_41, __p1_41) __extension__ ({ \
-  int16x4_t __s0_41 = __p0_41; \
-  int16x4_t __rev0_41;  __rev0_41 = __builtin_shufflevector(__s0_41, __s0_41, 3, 2, 1, 0); \
-  int16x4_t __ret_41; \
-  __ret_41 = __noswap_splat_lane_s16(__rev0_41, __p1_41); \
-  __ret_41 = __builtin_shufflevector(__ret_41, __ret_41, 3, 2, 1, 0); \
-  __ret_41; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vdup_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vdup_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vdupq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vdupq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vdupq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vdupq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vdupq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vdupq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vdupq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vdupq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vdupq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vdupq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vdupq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vdup_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vdup_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vdup_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vdup_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vdup_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vdup_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vdup_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vdup_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vdup_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 ^ __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 ^ __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vext_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vext_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vext_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vext_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vext_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vext_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vext_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vext_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vext_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_high_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_high_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_high_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_high_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_high_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_high_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_high_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_high_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_high_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_high_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_high_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_high_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai poly8x8_t vget_low_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai poly16x4_t vget_low_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai uint8x8_t vget_low_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai uint32x2_t vget_low_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai uint64x1_t vget_low_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai uint16x4_t vget_low_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai int8x8_t vget_low_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai float32x2_t vget_low_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai float16x4_t vget_low_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1);
-  return __ret;
-}
-#else
-__ai int32x2_t vget_low_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai int64x1_t vget_low_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai int16x4_t vget_low_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_dup_p8(__p0) __extension__ ({ \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_dup_p16(__p0) __extension__ ({ \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p8(__p0) __extension__ ({ \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p16(__p0) __extension__ ({ \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u8(__p0) __extension__ ({ \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u32(__p0) __extension__ ({ \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u64(__p0) __extension__ ({ \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_dup_u16(__p0) __extension__ ({ \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s8(__p0) __extension__ ({ \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f32(__p0) __extension__ ({ \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s32(__p0) __extension__ ({ \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s64(__p0) __extension__ ({ \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_dup_s16(__p0) __extension__ ({ \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_dup_u8(__p0) __extension__ ({ \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_dup_u32(__p0) __extension__ ({ \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_u64(__p0) __extension__ ({ \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_dup_u16(__p0) __extension__ ({ \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_dup_s8(__p0) __extension__ ({ \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_dup_f32(__p0) __extension__ ({ \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_dup_s32(__p0) __extension__ ({ \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_s64(__p0) __extension__ ({ \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_dup_s16(__p0) __extension__ ({ \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-  __ret; \
-})
-#else
-#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x2(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x2(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x2(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x2(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x2(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x2(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x2(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x2(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x2(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x2(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x2(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x2(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x2(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x2(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x2(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x2(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x2(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x2(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x2(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x2(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x2(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x2(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x3(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x3(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x3(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x3(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x3(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x3(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x3(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x3(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x3(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x3(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x3(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x3(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x3(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x3(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x3(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x3(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x3(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x3(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x3(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x3(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x3(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x3(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld1_p8_x4(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld1_p16_x4(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld1q_p8_x4(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld1q_p16_x4(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld1q_u8_x4(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld1q_u32_x4(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld1q_u64_x4(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld1q_u16_x4(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld1q_s8_x4(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld1q_f32_x4(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld1q_s32_x4(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld1q_s64_x4(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld1q_s16_x4(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld1_u8_x4(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld1_u32_x4(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_u64_x4(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld1_u16_x4(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld1_s8_x4(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld1_f32_x4(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld1_s32_x4(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_s64_x4(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld1_s16_x4(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld2_dup_p8(__p0) __extension__ ({ \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld2_dup_p16(__p0) __extension__ ({ \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld2q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s8(__p0) __extension__ ({ \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f32(__p0) __extension__ ({ \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s32(__p0) __extension__ ({ \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld2q_dup_s16(__p0) __extension__ ({ \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld2_dup_u8(__p0) __extension__ ({ \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld2_dup_u32(__p0) __extension__ ({ \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_u64(__p0) __extension__ ({ \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld2_dup_u16(__p0) __extension__ ({ \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld2_dup_s8(__p0) __extension__ ({ \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld2_dup_f32(__p0) __extension__ ({ \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld2_dup_s32(__p0) __extension__ ({ \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_s64(__p0) __extension__ ({ \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld2_dup_s16(__p0) __extension__ ({ \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  poly16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int32x4x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  uint16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int32x2x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  int16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld3_dup_p8(__p0) __extension__ ({ \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld3_dup_p16(__p0) __extension__ ({ \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld3q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s8(__p0) __extension__ ({ \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f32(__p0) __extension__ ({ \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s32(__p0) __extension__ ({ \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld3q_dup_s16(__p0) __extension__ ({ \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld3_dup_u8(__p0) __extension__ ({ \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld3_dup_u32(__p0) __extension__ ({ \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_u64(__p0) __extension__ ({ \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld3_dup_u16(__p0) __extension__ ({ \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld3_dup_s8(__p0) __extension__ ({ \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld3_dup_f32(__p0) __extension__ ({ \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld3_dup_s32(__p0) __extension__ ({ \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_s64(__p0) __extension__ ({ \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld3_dup_s16(__p0) __extension__ ({ \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  poly16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int32x4x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  uint16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int32x2x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  int16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
-  __ret; \
-})
-#else
-#define vld4_dup_p8(__p0) __extension__ ({ \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
-  __ret; \
-})
-#else
-#define vld4_dup_p16(__p0) __extension__ ({ \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p8(__p0) __extension__ ({ \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p16(__p0) __extension__ ({ \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u8(__p0) __extension__ ({ \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u32(__p0) __extension__ ({ \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
-  __ret; \
-})
-#else
-#define vld4q_dup_u16(__p0) __extension__ ({ \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s8(__p0) __extension__ ({ \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f32(__p0) __extension__ ({ \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s32(__p0) __extension__ ({ \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
-  __ret; \
-})
-#else
-#define vld4q_dup_s16(__p0) __extension__ ({ \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
-  __ret; \
-})
-#else
-#define vld4_dup_u8(__p0) __extension__ ({ \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
-  __ret; \
-})
-#else
-#define vld4_dup_u32(__p0) __extension__ ({ \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_u64(__p0) __extension__ ({ \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
-  __ret; \
-})
-#else
-#define vld4_dup_u16(__p0) __extension__ ({ \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
-  __ret; \
-})
-#else
-#define vld4_dup_s8(__p0) __extension__ ({ \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
-  __ret; \
-})
-#else
-#define vld4_dup_f32(__p0) __extension__ ({ \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
-  __ret; \
-})
-#else
-#define vld4_dup_s32(__p0) __extension__ ({ \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_s64(__p0) __extension__ ({ \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
-  __ret; \
-})
-#else
-#define vld4_dup_s16(__p0) __extension__ ({ \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-  __ret; \
-})
-#else
-#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-  __ret; \
-})
-#else
-#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  poly16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int32x4x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-  __ret; \
-})
-#else
-#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-  __ret; \
-})
-#else
-#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-  __ret; \
-})
-#else
-#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  uint16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-  __ret; \
-})
-#else
-#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-  __ret; \
-})
-#else
-#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-  __ret; \
-})
-#else
-#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int32x2x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-  __ret; \
-})
-#else
-#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  int16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u32(__p0_42, __p1_42, __p2_42, __p3_42) __extension__ ({ \
-  uint32x4_t __s0_42 = __p0_42; \
-  uint32x4_t __s1_42 = __p1_42; \
-  uint32x2_t __s2_42 = __p2_42; \
-  uint32x4_t __ret_42; \
-  __ret_42 = __s0_42 + __s1_42 * splatq_lane_u32(__s2_42, __p3_42); \
-  __ret_42; \
-})
-#else
-#define vmlaq_lane_u32(__p0_43, __p1_43, __p2_43, __p3_43) __extension__ ({ \
-  uint32x4_t __s0_43 = __p0_43; \
-  uint32x4_t __s1_43 = __p1_43; \
-  uint32x2_t __s2_43 = __p2_43; \
-  uint32x4_t __rev0_43;  __rev0_43 = __builtin_shufflevector(__s0_43, __s0_43, 3, 2, 1, 0); \
-  uint32x4_t __rev1_43;  __rev1_43 = __builtin_shufflevector(__s1_43, __s1_43, 3, 2, 1, 0); \
-  uint32x2_t __rev2_43;  __rev2_43 = __builtin_shufflevector(__s2_43, __s2_43, 1, 0); \
-  uint32x4_t __ret_43; \
-  __ret_43 = __rev0_43 + __rev1_43 * __noswap_splatq_lane_u32(__rev2_43, __p3_43); \
-  __ret_43 = __builtin_shufflevector(__ret_43, __ret_43, 3, 2, 1, 0); \
-  __ret_43; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_u16(__p0_44, __p1_44, __p2_44, __p3_44) __extension__ ({ \
-  uint16x8_t __s0_44 = __p0_44; \
-  uint16x8_t __s1_44 = __p1_44; \
-  uint16x4_t __s2_44 = __p2_44; \
-  uint16x8_t __ret_44; \
-  __ret_44 = __s0_44 + __s1_44 * splatq_lane_u16(__s2_44, __p3_44); \
-  __ret_44; \
-})
-#else
-#define vmlaq_lane_u16(__p0_45, __p1_45, __p2_45, __p3_45) __extension__ ({ \
-  uint16x8_t __s0_45 = __p0_45; \
-  uint16x8_t __s1_45 = __p1_45; \
-  uint16x4_t __s2_45 = __p2_45; \
-  uint16x8_t __rev0_45;  __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_45;  __rev1_45 = __builtin_shufflevector(__s1_45, __s1_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_45;  __rev2_45 = __builtin_shufflevector(__s2_45, __s2_45, 3, 2, 1, 0); \
-  uint16x8_t __ret_45; \
-  __ret_45 = __rev0_45 + __rev1_45 * __noswap_splatq_lane_u16(__rev2_45, __p3_45); \
-  __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_45; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_f32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \
-  float32x4_t __s0_46 = __p0_46; \
-  float32x4_t __s1_46 = __p1_46; \
-  float32x2_t __s2_46 = __p2_46; \
-  float32x4_t __ret_46; \
-  __ret_46 = __s0_46 + __s1_46 * splatq_lane_f32(__s2_46, __p3_46); \
-  __ret_46; \
-})
-#else
-#define vmlaq_lane_f32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \
-  float32x4_t __s0_47 = __p0_47; \
-  float32x4_t __s1_47 = __p1_47; \
-  float32x2_t __s2_47 = __p2_47; \
-  float32x4_t __rev0_47;  __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \
-  float32x4_t __rev1_47;  __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \
-  float32x2_t __rev2_47;  __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \
-  float32x4_t __ret_47; \
-  __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_f32(__rev2_47, __p3_47); \
-  __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \
-  __ret_47; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s32(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \
-  int32x4_t __s0_48 = __p0_48; \
-  int32x4_t __s1_48 = __p1_48; \
-  int32x2_t __s2_48 = __p2_48; \
-  int32x4_t __ret_48; \
-  __ret_48 = __s0_48 + __s1_48 * splatq_lane_s32(__s2_48, __p3_48); \
-  __ret_48; \
-})
-#else
-#define vmlaq_lane_s32(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \
-  int32x4_t __s0_49 = __p0_49; \
-  int32x4_t __s1_49 = __p1_49; \
-  int32x2_t __s2_49 = __p2_49; \
-  int32x4_t __rev0_49;  __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 3, 2, 1, 0); \
-  int32x4_t __rev1_49;  __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 3, 2, 1, 0); \
-  int32x2_t __rev2_49;  __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 1, 0); \
-  int32x4_t __ret_49; \
-  __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_s32(__rev2_49, __p3_49); \
-  __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 3, 2, 1, 0); \
-  __ret_49; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_lane_s16(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \
-  int16x8_t __s0_50 = __p0_50; \
-  int16x8_t __s1_50 = __p1_50; \
-  int16x4_t __s2_50 = __p2_50; \
-  int16x8_t __ret_50; \
-  __ret_50 = __s0_50 + __s1_50 * splatq_lane_s16(__s2_50, __p3_50); \
-  __ret_50; \
-})
-#else
-#define vmlaq_lane_s16(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \
-  int16x8_t __s0_51 = __p0_51; \
-  int16x8_t __s1_51 = __p1_51; \
-  int16x4_t __s2_51 = __p2_51; \
-  int16x8_t __rev0_51;  __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_51;  __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_51;  __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 3, 2, 1, 0); \
-  int16x8_t __ret_51; \
-  __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_s16(__rev2_51, __p3_51); \
-  __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_51; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \
-  uint32x2_t __s0_52 = __p0_52; \
-  uint32x2_t __s1_52 = __p1_52; \
-  uint32x2_t __s2_52 = __p2_52; \
-  uint32x2_t __ret_52; \
-  __ret_52 = __s0_52 + __s1_52 * splat_lane_u32(__s2_52, __p3_52); \
-  __ret_52; \
-})
-#else
-#define vmla_lane_u32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \
-  uint32x2_t __s0_53 = __p0_53; \
-  uint32x2_t __s1_53 = __p1_53; \
-  uint32x2_t __s2_53 = __p2_53; \
-  uint32x2_t __rev0_53;  __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 1, 0); \
-  uint32x2_t __rev1_53;  __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 1, 0); \
-  uint32x2_t __rev2_53;  __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \
-  uint32x2_t __ret_53; \
-  __ret_53 = __rev0_53 + __rev1_53 * __noswap_splat_lane_u32(__rev2_53, __p3_53); \
-  __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 1, 0); \
-  __ret_53; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_u16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \
-  uint16x4_t __s0_54 = __p0_54; \
-  uint16x4_t __s1_54 = __p1_54; \
-  uint16x4_t __s2_54 = __p2_54; \
-  uint16x4_t __ret_54; \
-  __ret_54 = __s0_54 + __s1_54 * splat_lane_u16(__s2_54, __p3_54); \
-  __ret_54; \
-})
-#else
-#define vmla_lane_u16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \
-  uint16x4_t __s0_55 = __p0_55; \
-  uint16x4_t __s1_55 = __p1_55; \
-  uint16x4_t __s2_55 = __p2_55; \
-  uint16x4_t __rev0_55;  __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \
-  uint16x4_t __rev1_55;  __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \
-  uint16x4_t __rev2_55;  __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \
-  uint16x4_t __ret_55; \
-  __ret_55 = __rev0_55 + __rev1_55 * __noswap_splat_lane_u16(__rev2_55, __p3_55); \
-  __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \
-  __ret_55; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_f32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \
-  float32x2_t __s0_56 = __p0_56; \
-  float32x2_t __s1_56 = __p1_56; \
-  float32x2_t __s2_56 = __p2_56; \
-  float32x2_t __ret_56; \
-  __ret_56 = __s0_56 + __s1_56 * splat_lane_f32(__s2_56, __p3_56); \
-  __ret_56; \
-})
-#else
-#define vmla_lane_f32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \
-  float32x2_t __s0_57 = __p0_57; \
-  float32x2_t __s1_57 = __p1_57; \
-  float32x2_t __s2_57 = __p2_57; \
-  float32x2_t __rev0_57;  __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \
-  float32x2_t __rev1_57;  __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \
-  float32x2_t __rev2_57;  __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \
-  float32x2_t __ret_57; \
-  __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_f32(__rev2_57, __p3_57); \
-  __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \
-  __ret_57; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \
-  int32x2_t __s0_58 = __p0_58; \
-  int32x2_t __s1_58 = __p1_58; \
-  int32x2_t __s2_58 = __p2_58; \
-  int32x2_t __ret_58; \
-  __ret_58 = __s0_58 + __s1_58 * splat_lane_s32(__s2_58, __p3_58); \
-  __ret_58; \
-})
-#else
-#define vmla_lane_s32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \
-  int32x2_t __s0_59 = __p0_59; \
-  int32x2_t __s1_59 = __p1_59; \
-  int32x2_t __s2_59 = __p2_59; \
-  int32x2_t __rev0_59;  __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 1, 0); \
-  int32x2_t __rev1_59;  __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 1, 0); \
-  int32x2_t __rev2_59;  __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \
-  int32x2_t __ret_59; \
-  __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_s32(__rev2_59, __p3_59); \
-  __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 1, 0); \
-  __ret_59; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_lane_s16(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \
-  int16x4_t __s0_60 = __p0_60; \
-  int16x4_t __s1_60 = __p1_60; \
-  int16x4_t __s2_60 = __p2_60; \
-  int16x4_t __ret_60; \
-  __ret_60 = __s0_60 + __s1_60 * splat_lane_s16(__s2_60, __p3_60); \
-  __ret_60; \
-})
-#else
-#define vmla_lane_s16(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \
-  int16x4_t __s0_61 = __p0_61; \
-  int16x4_t __s1_61 = __p1_61; \
-  int16x4_t __s2_61 = __p2_61; \
-  int16x4_t __rev0_61;  __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \
-  int16x4_t __rev1_61;  __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \
-  int16x4_t __rev2_61;  __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 3, 2, 1, 0); \
-  int16x4_t __ret_61; \
-  __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_s16(__rev2_61, __p3_61); \
-  __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \
-  __ret_61; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \
-  uint32x4_t __s0_62 = __p0_62; \
-  uint32x4_t __s1_62 = __p1_62; \
-  uint32x2_t __s2_62 = __p2_62; \
-  uint32x4_t __ret_62; \
-  __ret_62 = __s0_62 - __s1_62 * splatq_lane_u32(__s2_62, __p3_62); \
-  __ret_62; \
-})
-#else
-#define vmlsq_lane_u32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \
-  uint32x4_t __s0_63 = __p0_63; \
-  uint32x4_t __s1_63 = __p1_63; \
-  uint32x2_t __s2_63 = __p2_63; \
-  uint32x4_t __rev0_63;  __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 3, 2, 1, 0); \
-  uint32x4_t __rev1_63;  __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 3, 2, 1, 0); \
-  uint32x2_t __rev2_63;  __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \
-  uint32x4_t __ret_63; \
-  __ret_63 = __rev0_63 - __rev1_63 * __noswap_splatq_lane_u32(__rev2_63, __p3_63); \
-  __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 3, 2, 1, 0); \
-  __ret_63; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_u16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \
-  uint16x8_t __s0_64 = __p0_64; \
-  uint16x8_t __s1_64 = __p1_64; \
-  uint16x4_t __s2_64 = __p2_64; \
-  uint16x8_t __ret_64; \
-  __ret_64 = __s0_64 - __s1_64 * splatq_lane_u16(__s2_64, __p3_64); \
-  __ret_64; \
-})
-#else
-#define vmlsq_lane_u16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \
-  uint16x8_t __s0_65 = __p0_65; \
-  uint16x8_t __s1_65 = __p1_65; \
-  uint16x4_t __s2_65 = __p2_65; \
-  uint16x8_t __rev0_65;  __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_65;  __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_65;  __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \
-  uint16x8_t __ret_65; \
-  __ret_65 = __rev0_65 - __rev1_65 * __noswap_splatq_lane_u16(__rev2_65, __p3_65); \
-  __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_65; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_f32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \
-  float32x4_t __s0_66 = __p0_66; \
-  float32x4_t __s1_66 = __p1_66; \
-  float32x2_t __s2_66 = __p2_66; \
-  float32x4_t __ret_66; \
-  __ret_66 = __s0_66 - __s1_66 * splatq_lane_f32(__s2_66, __p3_66); \
-  __ret_66; \
-})
-#else
-#define vmlsq_lane_f32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \
-  float32x4_t __s0_67 = __p0_67; \
-  float32x4_t __s1_67 = __p1_67; \
-  float32x2_t __s2_67 = __p2_67; \
-  float32x4_t __rev0_67;  __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \
-  float32x4_t __rev1_67;  __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \
-  float32x2_t __rev2_67;  __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \
-  float32x4_t __ret_67; \
-  __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_f32(__rev2_67, __p3_67); \
-  __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \
-  __ret_67; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \
-  int32x4_t __s0_68 = __p0_68; \
-  int32x4_t __s1_68 = __p1_68; \
-  int32x2_t __s2_68 = __p2_68; \
-  int32x4_t __ret_68; \
-  __ret_68 = __s0_68 - __s1_68 * splatq_lane_s32(__s2_68, __p3_68); \
-  __ret_68; \
-})
-#else
-#define vmlsq_lane_s32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \
-  int32x4_t __s0_69 = __p0_69; \
-  int32x4_t __s1_69 = __p1_69; \
-  int32x2_t __s2_69 = __p2_69; \
-  int32x4_t __rev0_69;  __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 3, 2, 1, 0); \
-  int32x4_t __rev1_69;  __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 3, 2, 1, 0); \
-  int32x2_t __rev2_69;  __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \
-  int32x4_t __ret_69; \
-  __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_s32(__rev2_69, __p3_69); \
-  __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 3, 2, 1, 0); \
-  __ret_69; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_lane_s16(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \
-  int16x8_t __s0_70 = __p0_70; \
-  int16x8_t __s1_70 = __p1_70; \
-  int16x4_t __s2_70 = __p2_70; \
-  int16x8_t __ret_70; \
-  __ret_70 = __s0_70 - __s1_70 * splatq_lane_s16(__s2_70, __p3_70); \
-  __ret_70; \
-})
-#else
-#define vmlsq_lane_s16(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \
-  int16x8_t __s0_71 = __p0_71; \
-  int16x8_t __s1_71 = __p1_71; \
-  int16x4_t __s2_71 = __p2_71; \
-  int16x8_t __rev0_71;  __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_71;  __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_71;  __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 3, 2, 1, 0); \
-  int16x8_t __ret_71; \
-  __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_s16(__rev2_71, __p3_71); \
-  __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_71; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \
-  uint32x2_t __s0_72 = __p0_72; \
-  uint32x2_t __s1_72 = __p1_72; \
-  uint32x2_t __s2_72 = __p2_72; \
-  uint32x2_t __ret_72; \
-  __ret_72 = __s0_72 - __s1_72 * splat_lane_u32(__s2_72, __p3_72); \
-  __ret_72; \
-})
-#else
-#define vmls_lane_u32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \
-  uint32x2_t __s0_73 = __p0_73; \
-  uint32x2_t __s1_73 = __p1_73; \
-  uint32x2_t __s2_73 = __p2_73; \
-  uint32x2_t __rev0_73;  __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 1, 0); \
-  uint32x2_t __rev1_73;  __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 1, 0); \
-  uint32x2_t __rev2_73;  __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \
-  uint32x2_t __ret_73; \
-  __ret_73 = __rev0_73 - __rev1_73 * __noswap_splat_lane_u32(__rev2_73, __p3_73); \
-  __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 1, 0); \
-  __ret_73; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_u16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \
-  uint16x4_t __s0_74 = __p0_74; \
-  uint16x4_t __s1_74 = __p1_74; \
-  uint16x4_t __s2_74 = __p2_74; \
-  uint16x4_t __ret_74; \
-  __ret_74 = __s0_74 - __s1_74 * splat_lane_u16(__s2_74, __p3_74); \
-  __ret_74; \
-})
-#else
-#define vmls_lane_u16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \
-  uint16x4_t __s0_75 = __p0_75; \
-  uint16x4_t __s1_75 = __p1_75; \
-  uint16x4_t __s2_75 = __p2_75; \
-  uint16x4_t __rev0_75;  __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \
-  uint16x4_t __rev1_75;  __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \
-  uint16x4_t __rev2_75;  __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \
-  uint16x4_t __ret_75; \
-  __ret_75 = __rev0_75 - __rev1_75 * __noswap_splat_lane_u16(__rev2_75, __p3_75); \
-  __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \
-  __ret_75; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_f32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \
-  float32x2_t __s0_76 = __p0_76; \
-  float32x2_t __s1_76 = __p1_76; \
-  float32x2_t __s2_76 = __p2_76; \
-  float32x2_t __ret_76; \
-  __ret_76 = __s0_76 - __s1_76 * splat_lane_f32(__s2_76, __p3_76); \
-  __ret_76; \
-})
-#else
-#define vmls_lane_f32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \
-  float32x2_t __s0_77 = __p0_77; \
-  float32x2_t __s1_77 = __p1_77; \
-  float32x2_t __s2_77 = __p2_77; \
-  float32x2_t __rev0_77;  __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \
-  float32x2_t __rev1_77;  __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \
-  float32x2_t __rev2_77;  __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \
-  float32x2_t __ret_77; \
-  __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_f32(__rev2_77, __p3_77); \
-  __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \
-  __ret_77; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \
-  int32x2_t __s0_78 = __p0_78; \
-  int32x2_t __s1_78 = __p1_78; \
-  int32x2_t __s2_78 = __p2_78; \
-  int32x2_t __ret_78; \
-  __ret_78 = __s0_78 - __s1_78 * splat_lane_s32(__s2_78, __p3_78); \
-  __ret_78; \
-})
-#else
-#define vmls_lane_s32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \
-  int32x2_t __s0_79 = __p0_79; \
-  int32x2_t __s1_79 = __p1_79; \
-  int32x2_t __s2_79 = __p2_79; \
-  int32x2_t __rev0_79;  __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 1, 0); \
-  int32x2_t __rev1_79;  __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 1, 0); \
-  int32x2_t __rev2_79;  __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \
-  int32x2_t __ret_79; \
-  __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_s32(__rev2_79, __p3_79); \
-  __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 1, 0); \
-  __ret_79; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_lane_s16(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \
-  int16x4_t __s0_80 = __p0_80; \
-  int16x4_t __s1_80 = __p1_80; \
-  int16x4_t __s2_80 = __p2_80; \
-  int16x4_t __ret_80; \
-  __ret_80 = __s0_80 - __s1_80 * splat_lane_s16(__s2_80, __p3_80); \
-  __ret_80; \
-})
-#else
-#define vmls_lane_s16(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \
-  int16x4_t __s0_81 = __p0_81; \
-  int16x4_t __s1_81 = __p1_81; \
-  int16x4_t __s2_81 = __p2_81; \
-  int16x4_t __rev0_81;  __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \
-  int16x4_t __rev1_81;  __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \
-  int16x4_t __rev2_81;  __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 3, 2, 1, 0); \
-  int16x4_t __ret_81; \
-  __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_s16(__rev2_81, __p3_81); \
-  __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \
-  __ret_81; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  return __ret;
-}
-#else
-__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x8_t vmov_n_p8(poly8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x4_t vmov_n_p16(poly16_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly8x16_t vmovq_n_p8(poly8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai poly16x8_t vmovq_n_p16(poly16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovq_n_u8(uint8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovq_n_u32(uint32_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovq_n_u64(uint64_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovq_n_u16(uint16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x16_t vmovq_n_s8(int8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x4_t vmovq_n_f32(float32_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x4_t vmovq_n_s32(int32_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int64x2_t vmovq_n_s64(int64_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x8_t vmovq_n_s16(int16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint8x8_t vmov_n_u8(uint8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmov_n_u32(uint32_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vmov_n_u64(uint64_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmov_n_u16(uint16_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int8x8_t vmov_n_s8(int8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float32x2_t vmov_n_f32(float32_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai int32x2_t vmov_n_s32(int32_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vmov_n_s64(int64_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai int16x4_t vmov_n_s16(int16_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmovl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmovl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmovl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmovl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u32(__p0_82, __p1_82, __p2_82) __extension__ ({ \
-  uint32x4_t __s0_82 = __p0_82; \
-  uint32x2_t __s1_82 = __p1_82; \
-  uint32x4_t __ret_82; \
-  __ret_82 = __s0_82 * splatq_lane_u32(__s1_82, __p2_82); \
-  __ret_82; \
-})
-#else
-#define vmulq_lane_u32(__p0_83, __p1_83, __p2_83) __extension__ ({ \
-  uint32x4_t __s0_83 = __p0_83; \
-  uint32x2_t __s1_83 = __p1_83; \
-  uint32x4_t __rev0_83;  __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 3, 2, 1, 0); \
-  uint32x2_t __rev1_83;  __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \
-  uint32x4_t __ret_83; \
-  __ret_83 = __rev0_83 * __noswap_splatq_lane_u32(__rev1_83, __p2_83); \
-  __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 3, 2, 1, 0); \
-  __ret_83; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_u16(__p0_84, __p1_84, __p2_84) __extension__ ({ \
-  uint16x8_t __s0_84 = __p0_84; \
-  uint16x4_t __s1_84 = __p1_84; \
-  uint16x8_t __ret_84; \
-  __ret_84 = __s0_84 * splatq_lane_u16(__s1_84, __p2_84); \
-  __ret_84; \
-})
-#else
-#define vmulq_lane_u16(__p0_85, __p1_85, __p2_85) __extension__ ({ \
-  uint16x8_t __s0_85 = __p0_85; \
-  uint16x4_t __s1_85 = __p1_85; \
-  uint16x8_t __rev0_85;  __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_85;  __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \
-  uint16x8_t __ret_85; \
-  __ret_85 = __rev0_85 * __noswap_splatq_lane_u16(__rev1_85, __p2_85); \
-  __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_85; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f32(__p0_86, __p1_86, __p2_86) __extension__ ({ \
-  float32x4_t __s0_86 = __p0_86; \
-  float32x2_t __s1_86 = __p1_86; \
-  float32x4_t __ret_86; \
-  __ret_86 = __s0_86 * splatq_lane_f32(__s1_86, __p2_86); \
-  __ret_86; \
-})
-#else
-#define vmulq_lane_f32(__p0_87, __p1_87, __p2_87) __extension__ ({ \
-  float32x4_t __s0_87 = __p0_87; \
-  float32x2_t __s1_87 = __p1_87; \
-  float32x4_t __rev0_87;  __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \
-  float32x2_t __rev1_87;  __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \
-  float32x4_t __ret_87; \
-  __ret_87 = __rev0_87 * __noswap_splatq_lane_f32(__rev1_87, __p2_87); \
-  __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \
-  __ret_87; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s32(__p0_88, __p1_88, __p2_88) __extension__ ({ \
-  int32x4_t __s0_88 = __p0_88; \
-  int32x2_t __s1_88 = __p1_88; \
-  int32x4_t __ret_88; \
-  __ret_88 = __s0_88 * splatq_lane_s32(__s1_88, __p2_88); \
-  __ret_88; \
-})
-#else
-#define vmulq_lane_s32(__p0_89, __p1_89, __p2_89) __extension__ ({ \
-  int32x4_t __s0_89 = __p0_89; \
-  int32x2_t __s1_89 = __p1_89; \
-  int32x4_t __rev0_89;  __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 3, 2, 1, 0); \
-  int32x2_t __rev1_89;  __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \
-  int32x4_t __ret_89; \
-  __ret_89 = __rev0_89 * __noswap_splatq_lane_s32(__rev1_89, __p2_89); \
-  __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 3, 2, 1, 0); \
-  __ret_89; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_s16(__p0_90, __p1_90, __p2_90) __extension__ ({ \
-  int16x8_t __s0_90 = __p0_90; \
-  int16x4_t __s1_90 = __p1_90; \
-  int16x8_t __ret_90; \
-  __ret_90 = __s0_90 * splatq_lane_s16(__s1_90, __p2_90); \
-  __ret_90; \
-})
-#else
-#define vmulq_lane_s16(__p0_91, __p1_91, __p2_91) __extension__ ({ \
-  int16x8_t __s0_91 = __p0_91; \
-  int16x4_t __s1_91 = __p1_91; \
-  int16x8_t __rev0_91;  __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_91;  __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 3, 2, 1, 0); \
-  int16x8_t __ret_91; \
-  __ret_91 = __rev0_91 * __noswap_splatq_lane_s16(__rev1_91, __p2_91); \
-  __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_91; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u32(__p0_92, __p1_92, __p2_92) __extension__ ({ \
-  uint32x2_t __s0_92 = __p0_92; \
-  uint32x2_t __s1_92 = __p1_92; \
-  uint32x2_t __ret_92; \
-  __ret_92 = __s0_92 * splat_lane_u32(__s1_92, __p2_92); \
-  __ret_92; \
-})
-#else
-#define vmul_lane_u32(__p0_93, __p1_93, __p2_93) __extension__ ({ \
-  uint32x2_t __s0_93 = __p0_93; \
-  uint32x2_t __s1_93 = __p1_93; \
-  uint32x2_t __rev0_93;  __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 1, 0); \
-  uint32x2_t __rev1_93;  __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \
-  uint32x2_t __ret_93; \
-  __ret_93 = __rev0_93 * __noswap_splat_lane_u32(__rev1_93, __p2_93); \
-  __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 1, 0); \
-  __ret_93; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_u16(__p0_94, __p1_94, __p2_94) __extension__ ({ \
-  uint16x4_t __s0_94 = __p0_94; \
-  uint16x4_t __s1_94 = __p1_94; \
-  uint16x4_t __ret_94; \
-  __ret_94 = __s0_94 * splat_lane_u16(__s1_94, __p2_94); \
-  __ret_94; \
-})
-#else
-#define vmul_lane_u16(__p0_95, __p1_95, __p2_95) __extension__ ({ \
-  uint16x4_t __s0_95 = __p0_95; \
-  uint16x4_t __s1_95 = __p1_95; \
-  uint16x4_t __rev0_95;  __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \
-  uint16x4_t __rev1_95;  __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \
-  uint16x4_t __ret_95; \
-  __ret_95 = __rev0_95 * __noswap_splat_lane_u16(__rev1_95, __p2_95); \
-  __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \
-  __ret_95; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f32(__p0_96, __p1_96, __p2_96) __extension__ ({ \
-  float32x2_t __s0_96 = __p0_96; \
-  float32x2_t __s1_96 = __p1_96; \
-  float32x2_t __ret_96; \
-  __ret_96 = __s0_96 * splat_lane_f32(__s1_96, __p2_96); \
-  __ret_96; \
-})
-#else
-#define vmul_lane_f32(__p0_97, __p1_97, __p2_97) __extension__ ({ \
-  float32x2_t __s0_97 = __p0_97; \
-  float32x2_t __s1_97 = __p1_97; \
-  float32x2_t __rev0_97;  __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \
-  float32x2_t __rev1_97;  __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \
-  float32x2_t __ret_97; \
-  __ret_97 = __rev0_97 * __noswap_splat_lane_f32(__rev1_97, __p2_97); \
-  __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \
-  __ret_97; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s32(__p0_98, __p1_98, __p2_98) __extension__ ({ \
-  int32x2_t __s0_98 = __p0_98; \
-  int32x2_t __s1_98 = __p1_98; \
-  int32x2_t __ret_98; \
-  __ret_98 = __s0_98 * splat_lane_s32(__s1_98, __p2_98); \
-  __ret_98; \
-})
-#else
-#define vmul_lane_s32(__p0_99, __p1_99, __p2_99) __extension__ ({ \
-  int32x2_t __s0_99 = __p0_99; \
-  int32x2_t __s1_99 = __p1_99; \
-  int32x2_t __rev0_99;  __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 1, 0); \
-  int32x2_t __rev1_99;  __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \
-  int32x2_t __ret_99; \
-  __ret_99 = __rev0_99 * __noswap_splat_lane_s32(__rev1_99, __p2_99); \
-  __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 1, 0); \
-  __ret_99; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_s16(__p0_100, __p1_100, __p2_100) __extension__ ({ \
-  int16x4_t __s0_100 = __p0_100; \
-  int16x4_t __s1_100 = __p1_100; \
-  int16x4_t __ret_100; \
-  __ret_100 = __s0_100 * splat_lane_s16(__s1_100, __p2_100); \
-  __ret_100; \
-})
-#else
-#define vmul_lane_s16(__p0_101, __p1_101, __p2_101) __extension__ ({ \
-  int16x4_t __s0_101 = __p0_101; \
-  int16x4_t __s1_101 = __p1_101; \
-  int16x4_t __rev0_101;  __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \
-  int16x4_t __rev1_101;  __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 3, 2, 1, 0); \
-  int16x4_t __ret_101; \
-  __ret_101 = __rev0_101 * __noswap_splat_lane_s16(__rev1_101, __p2_101); \
-  __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \
-  __ret_101; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 * (uint32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 * (uint32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 * (float32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 * (float32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 * (int32x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 * (int32x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  return __ret;
-}
-#else
-__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u32(__p0_102, __p1_102, __p2_102) __extension__ ({ \
-  uint32x2_t __s0_102 = __p0_102; \
-  uint32x2_t __s1_102 = __p1_102; \
-  uint64x2_t __ret_102; \
-  __ret_102 = vmull_u32(__s0_102, splat_lane_u32(__s1_102, __p2_102)); \
-  __ret_102; \
-})
-#else
-#define vmull_lane_u32(__p0_103, __p1_103, __p2_103) __extension__ ({ \
-  uint32x2_t __s0_103 = __p0_103; \
-  uint32x2_t __s1_103 = __p1_103; \
-  uint32x2_t __rev0_103;  __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \
-  uint32x2_t __rev1_103;  __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \
-  uint64x2_t __ret_103; \
-  __ret_103 = __noswap_vmull_u32(__rev0_103, __noswap_splat_lane_u32(__rev1_103, __p2_103)); \
-  __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \
-  __ret_103; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_u16(__p0_104, __p1_104, __p2_104) __extension__ ({ \
-  uint16x4_t __s0_104 = __p0_104; \
-  uint16x4_t __s1_104 = __p1_104; \
-  uint32x4_t __ret_104; \
-  __ret_104 = vmull_u16(__s0_104, splat_lane_u16(__s1_104, __p2_104)); \
-  __ret_104; \
-})
-#else
-#define vmull_lane_u16(__p0_105, __p1_105, __p2_105) __extension__ ({ \
-  uint16x4_t __s0_105 = __p0_105; \
-  uint16x4_t __s1_105 = __p1_105; \
-  uint16x4_t __rev0_105;  __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \
-  uint16x4_t __rev1_105;  __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \
-  uint32x4_t __ret_105; \
-  __ret_105 = __noswap_vmull_u16(__rev0_105, __noswap_splat_lane_u16(__rev1_105, __p2_105)); \
-  __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \
-  __ret_105; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s32(__p0_106, __p1_106, __p2_106) __extension__ ({ \
-  int32x2_t __s0_106 = __p0_106; \
-  int32x2_t __s1_106 = __p1_106; \
-  int64x2_t __ret_106; \
-  __ret_106 = vmull_s32(__s0_106, splat_lane_s32(__s1_106, __p2_106)); \
-  __ret_106; \
-})
-#else
-#define vmull_lane_s32(__p0_107, __p1_107, __p2_107) __extension__ ({ \
-  int32x2_t __s0_107 = __p0_107; \
-  int32x2_t __s1_107 = __p1_107; \
-  int32x2_t __rev0_107;  __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \
-  int32x2_t __rev1_107;  __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \
-  int64x2_t __ret_107; \
-  __ret_107 = __noswap_vmull_s32(__rev0_107, __noswap_splat_lane_s32(__rev1_107, __p2_107)); \
-  __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \
-  __ret_107; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_lane_s16(__p0_108, __p1_108, __p2_108) __extension__ ({ \
-  int16x4_t __s0_108 = __p0_108; \
-  int16x4_t __s1_108 = __p1_108; \
-  int32x4_t __ret_108; \
-  __ret_108 = vmull_s16(__s0_108, splat_lane_s16(__s1_108, __p2_108)); \
-  __ret_108; \
-})
-#else
-#define vmull_lane_s16(__p0_109, __p1_109, __p2_109) __extension__ ({ \
-  int16x4_t __s0_109 = __p0_109; \
-  int16x4_t __s1_109 = __p1_109; \
-  int16x4_t __rev0_109;  __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \
-  int16x4_t __rev1_109;  __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \
-  int32x4_t __ret_109; \
-  __ret_109 = __noswap_vmull_s16(__rev0_109, __noswap_splat_lane_s16(__rev1_109, __p2_109)); \
-  __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \
-  __ret_109; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x8_t vmvn_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vmvnq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vmvnq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vmvnq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint8x8_t vmvn_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint32x2_t vmvn_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai uint16x4_t vmvn_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vmvn_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vmvn_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = ~__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vmvn_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = ~__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x16_t vnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x4_t vnegq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x4_t vnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x8_t vnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int8x8_t vneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float32x2_t vneg_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int32x2_t vneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int16x4_t vneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | ~__p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | ~__rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 | __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 | __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) {
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddlq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddlq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddlq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#else
-__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpaddl_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#else
-__ai int64x1_t vpaddl_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpaddl_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqabsq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqabsq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqabsq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqabs_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqabs_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqabs_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s32(__p0_110, __p1_110, __p2_110, __p3_110) __extension__ ({ \
-  int64x2_t __s0_110 = __p0_110; \
-  int32x2_t __s1_110 = __p1_110; \
-  int32x2_t __s2_110 = __p2_110; \
-  int64x2_t __ret_110; \
-  __ret_110 = vqdmlal_s32(__s0_110, __s1_110, splat_lane_s32(__s2_110, __p3_110)); \
-  __ret_110; \
-})
-#else
-#define vqdmlal_lane_s32(__p0_111, __p1_111, __p2_111, __p3_111) __extension__ ({ \
-  int64x2_t __s0_111 = __p0_111; \
-  int32x2_t __s1_111 = __p1_111; \
-  int32x2_t __s2_111 = __p2_111; \
-  int64x2_t __rev0_111;  __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \
-  int32x2_t __rev1_111;  __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \
-  int32x2_t __rev2_111;  __rev2_111 = __builtin_shufflevector(__s2_111, __s2_111, 1, 0); \
-  int64x2_t __ret_111; \
-  __ret_111 = __noswap_vqdmlal_s32(__rev0_111, __rev1_111, __noswap_splat_lane_s32(__rev2_111, __p3_111)); \
-  __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \
-  __ret_111; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_lane_s16(__p0_112, __p1_112, __p2_112, __p3_112) __extension__ ({ \
-  int32x4_t __s0_112 = __p0_112; \
-  int16x4_t __s1_112 = __p1_112; \
-  int16x4_t __s2_112 = __p2_112; \
-  int32x4_t __ret_112; \
-  __ret_112 = vqdmlal_s16(__s0_112, __s1_112, splat_lane_s16(__s2_112, __p3_112)); \
-  __ret_112; \
-})
-#else
-#define vqdmlal_lane_s16(__p0_113, __p1_113, __p2_113, __p3_113) __extension__ ({ \
-  int32x4_t __s0_113 = __p0_113; \
-  int16x4_t __s1_113 = __p1_113; \
-  int16x4_t __s2_113 = __p2_113; \
-  int32x4_t __rev0_113;  __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \
-  int16x4_t __rev1_113;  __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \
-  int16x4_t __rev2_113;  __rev2_113 = __builtin_shufflevector(__s2_113, __s2_113, 3, 2, 1, 0); \
-  int32x4_t __ret_113; \
-  __ret_113 = __noswap_vqdmlal_s16(__rev0_113, __rev1_113, __noswap_splat_lane_s16(__rev2_113, __p3_113)); \
-  __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \
-  __ret_113; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \
-  int64x2_t __s0_114 = __p0_114; \
-  int32x2_t __s1_114 = __p1_114; \
-  int32x2_t __s2_114 = __p2_114; \
-  int64x2_t __ret_114; \
-  __ret_114 = vqdmlsl_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \
-  __ret_114; \
-})
-#else
-#define vqdmlsl_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \
-  int64x2_t __s0_115 = __p0_115; \
-  int32x2_t __s1_115 = __p1_115; \
-  int32x2_t __s2_115 = __p2_115; \
-  int64x2_t __rev0_115;  __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \
-  int32x2_t __rev1_115;  __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \
-  int32x2_t __rev2_115;  __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \
-  int64x2_t __ret_115; \
-  __ret_115 = __noswap_vqdmlsl_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \
-  __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \
-  __ret_115; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \
-  int32x4_t __s0_116 = __p0_116; \
-  int16x4_t __s1_116 = __p1_116; \
-  int16x4_t __s2_116 = __p2_116; \
-  int32x4_t __ret_116; \
-  __ret_116 = vqdmlsl_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \
-  __ret_116; \
-})
-#else
-#define vqdmlsl_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \
-  int32x4_t __s0_117 = __p0_117; \
-  int16x4_t __s1_117 = __p1_117; \
-  int16x4_t __s2_117 = __p2_117; \
-  int32x4_t __rev0_117;  __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \
-  int16x4_t __rev1_117;  __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \
-  int16x4_t __rev2_117;  __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \
-  int32x4_t __ret_117; \
-  __ret_117 = __noswap_vqdmlsl_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \
-  __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \
-  __ret_117; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \
-  int32x2_t __s0_118 = __p0_118; \
-  int32x2_t __s1_118 = __p1_118; \
-  int64x2_t __ret_118; \
-  __ret_118 = vqdmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \
-  __ret_118; \
-})
-#else
-#define vqdmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \
-  int32x2_t __s0_119 = __p0_119; \
-  int32x2_t __s1_119 = __p1_119; \
-  int32x2_t __rev0_119;  __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \
-  int32x2_t __rev1_119;  __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \
-  int64x2_t __ret_119; \
-  __ret_119 = __noswap_vqdmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \
-  __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \
-  __ret_119; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \
-  int16x4_t __s0_120 = __p0_120; \
-  int16x4_t __s1_120 = __p1_120; \
-  int32x4_t __ret_120; \
-  __ret_120 = vqdmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \
-  __ret_120; \
-})
-#else
-#define vqdmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \
-  int16x4_t __s0_121 = __p0_121; \
-  int16x4_t __s1_121 = __p1_121; \
-  int16x4_t __rev0_121;  __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \
-  int16x4_t __rev1_121;  __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \
-  int32x4_t __ret_121; \
-  __ret_121 = __noswap_vqdmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \
-  __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \
-  __ret_121; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqmovn_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqmovn_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqmovn_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqmovun_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqmovun_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqmovun_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqnegq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqnegq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqnegq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqneg_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqneg_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqneg_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __ret;
-  __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __ret;
-  __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __ret;
-  __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1});
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __ret;
-  __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vqsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vqsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vqsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vqsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpeq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecpe_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev16_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev16q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev16_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev16_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev32_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev32_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev32q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev32q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev32_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev32_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev32_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev32_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrev64_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai poly16x4_t vrev64_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  return __ret;
-}
-#else
-__ai int8x16_t vrev64q_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai float32x4_t vrev64q_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2);
-  return __ret;
-}
-#else
-__ai int32x4_t vrev64q_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai int16x8_t vrev64q_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrev64_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrev64_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrev64_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrev64_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai float32x2_t vrev64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1, 0);
-  return __ret;
-}
-#else
-__ai int32x2_t vrev64_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai int16x4_t vrev64_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrte_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshlq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshlq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshlq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshlq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshlq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshlq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshlq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshlq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshl_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshl_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshl_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshl_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshl_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshl_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshl_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \
-  __ret; \
-})
-#else
-#define vshrq_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \
-  __ret; \
-})
-#else
-#define vshrq_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vshrq_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vshrq_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \
-  __ret; \
-})
-#else
-#define vshrq_n_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \
-  __ret; \
-})
-#else
-#define vshrq_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vshrq_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vshrq_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshr_n_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshr_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshr_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshr_n_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshr_n_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vshr_n_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshr_n_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#else
-#define vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#else
-#define vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#else
-#define vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#else
-#define vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \
-  __ret; \
-})
-#else
-#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret; \
-  __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \
-  __ret; \
-})
-#else
-#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  poly16x4_t __ret; \
-  __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \
-  __ret; \
-})
-#else
-#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret; \
-  __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \
-  __ret; \
-})
-#else
-#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret; \
-  __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \
-  __ret; \
-})
-#else
-#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret; \
-  __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \
-  __ret; \
-})
-#else
-#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint32x4_t __ret; \
-  __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \
-  __ret; \
-})
-#else
-#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \
-  __ret; \
-})
-#else
-#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \
-  __ret; \
-})
-#else
-#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret; \
-  __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \
-  __ret; \
-})
-#else
-#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \
-  __ret; \
-})
-#else
-#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret; \
-  __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \
-  __ret; \
-})
-#else
-#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  uint32x2_t __ret; \
-  __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64x1_t __s1 = __p1; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \
-  __ret; \
-})
-#else
-#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \
-  __ret; \
-})
-#else
-#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret; \
-  __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64x1_t __s1 = __p1; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \
-})
-#else
-#define vst1_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \
-})
-#else
-#define vst1_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \
-})
-#else
-#define vst1q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \
-})
-#else
-#define vst1q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \
-})
-#else
-#define vst1q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \
-})
-#else
-#define vst1q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \
-})
-#else
-#define vst1q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \
-})
-#else
-#define vst1q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \
-})
-#else
-#define vst1q_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \
-})
-#else
-#define vst1q_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \
-})
-#else
-#define vst1q_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \
-})
-#else
-#define vst1q_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \
-})
-#else
-#define vst1q_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \
-})
-#else
-#define vst1_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \
-})
-#else
-#define vst1_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \
-})
-#endif
-
-#define vst1_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \
-})
-#else
-#define vst1_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \
-})
-#else
-#define vst1_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \
-})
-#else
-#define vst1_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \
-})
-#else
-#define vst1_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \
-})
-#endif
-
-#define vst1_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \
-})
-#else
-#define vst1_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \
-})
-#else
-#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8_t __s1 = __p1; \
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \
-})
-#else
-#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4_t __s1 = __p1; \
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \
-})
-#else
-#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16_t __s1 = __p1; \
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \
-})
-#else
-#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8_t __s1 = __p1; \
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \
-})
-#else
-#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16_t __s1 = __p1; \
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \
-})
-#else
-#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4_t __s1 = __p1; \
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \
-})
-#else
-#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2_t __s1 = __p1; \
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \
-})
-#else
-#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8_t __s1 = __p1; \
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \
-})
-#else
-#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16_t __s1 = __p1; \
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \
-})
-#else
-#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \
-})
-#else
-#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \
-})
-#else
-#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2_t __s1 = __p1; \
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \
-})
-#else
-#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \
-})
-#else
-#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8_t __s1 = __p1; \
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \
-})
-#else
-#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2_t __s1 = __p1; \
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \
-})
-#endif
-
-#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \
-})
-#else
-#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4_t __s1 = __p1; \
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \
-})
-#else
-#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8_t __s1 = __p1; \
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \
-})
-#else
-#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \
-})
-#else
-#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \
-})
-#endif
-
-#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \
-})
-#else
-#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst1_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst1_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst1_u8_x2(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst1_u32_x2(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst1_u64_x2(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst1_u16_x2(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst1_s8_x2(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst1_f32_x2(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst1_s32_x2(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst1_s64_x2(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst1_s16_x2(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst1_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst1_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst1_u8_x3(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst1_u32_x3(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst1_u64_x3(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst1_u16_x3(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst1_s8_x3(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst1_f32_x3(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst1_s32_x3(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst1_s64_x3(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst1_s16_x3(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst1_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst1_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst1_u8_x4(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst1_u32_x4(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst1_u64_x4(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst1_u16_x4(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst1_s8_x4(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst1_f32_x4(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst1_s32_x4(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst1_s64_x4(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst1_s16_x4(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \
-})
-#else
-#define vst2_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \
-})
-#else
-#define vst2_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \
-})
-#else
-#define vst2q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \
-})
-#else
-#define vst2q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \
-})
-#else
-#define vst2q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \
-})
-#else
-#define vst2q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \
-})
-#else
-#define vst2q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \
-})
-#else
-#define vst2q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \
-})
-#else
-#define vst2q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \
-})
-#else
-#define vst2q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \
-})
-#else
-#define vst2q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \
-})
-#else
-#define vst2_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \
-})
-#else
-#define vst2_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \
-})
-#endif
-
-#define vst2_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \
-})
-#else
-#define vst2_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \
-})
-#else
-#define vst2_s8(__p0, __p1) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \
-})
-#else
-#define vst2_f32(__p0, __p1) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \
-})
-#else
-#define vst2_s32(__p0, __p1) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \
-})
-#endif
-
-#define vst2_s64(__p0, __p1) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \
-})
-#else
-#define vst2_s16(__p0, __p1) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \
-})
-#else
-#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x2_t __s1 = __p1; \
-  poly8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \
-})
-#else
-#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x2_t __s1 = __p1; \
-  poly16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \
-})
-#else
-#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x2_t __s1 = __p1; \
-  poly16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \
-})
-#else
-#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x2_t __s1 = __p1; \
-  uint32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \
-})
-#else
-#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x2_t __s1 = __p1; \
-  uint16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \
-})
-#else
-#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x2_t __s1 = __p1; \
-  float32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \
-})
-#else
-#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x2_t __s1 = __p1; \
-  int32x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \
-})
-#else
-#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x2_t __s1 = __p1; \
-  int16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \
-})
-#else
-#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x2_t __s1 = __p1; \
-  uint8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \
-})
-#else
-#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x2_t __s1 = __p1; \
-  uint32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \
-})
-#else
-#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x2_t __s1 = __p1; \
-  uint16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \
-})
-#else
-#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x2_t __s1 = __p1; \
-  int8x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \
-})
-#else
-#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x2_t __s1 = __p1; \
-  float32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \
-})
-#else
-#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x2_t __s1 = __p1; \
-  int32x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \
-})
-#else
-#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x2_t __s1 = __p1; \
-  int16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \
-})
-#else
-#define vst3_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \
-})
-#else
-#define vst3_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \
-})
-#else
-#define vst3q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \
-})
-#else
-#define vst3q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \
-})
-#else
-#define vst3q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \
-})
-#else
-#define vst3q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \
-})
-#else
-#define vst3q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \
-})
-#else
-#define vst3q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \
-})
-#else
-#define vst3q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \
-})
-#else
-#define vst3q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \
-})
-#else
-#define vst3q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \
-})
-#else
-#define vst3_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \
-})
-#else
-#define vst3_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \
-})
-#endif
-
-#define vst3_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \
-})
-#else
-#define vst3_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \
-})
-#else
-#define vst3_s8(__p0, __p1) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \
-})
-#else
-#define vst3_f32(__p0, __p1) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \
-})
-#else
-#define vst3_s32(__p0, __p1) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \
-})
-#endif
-
-#define vst3_s64(__p0, __p1) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \
-})
-#else
-#define vst3_s16(__p0, __p1) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \
-})
-#else
-#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x3_t __s1 = __p1; \
-  poly8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \
-})
-#else
-#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x3_t __s1 = __p1; \
-  poly16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \
-})
-#else
-#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x3_t __s1 = __p1; \
-  poly16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \
-})
-#else
-#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x3_t __s1 = __p1; \
-  uint32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \
-})
-#else
-#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x3_t __s1 = __p1; \
-  uint16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \
-})
-#else
-#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x3_t __s1 = __p1; \
-  float32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \
-})
-#else
-#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x3_t __s1 = __p1; \
-  int32x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \
-})
-#else
-#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x3_t __s1 = __p1; \
-  int16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \
-})
-#else
-#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x3_t __s1 = __p1; \
-  uint8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \
-})
-#else
-#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x3_t __s1 = __p1; \
-  uint32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \
-})
-#else
-#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x3_t __s1 = __p1; \
-  uint16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \
-})
-#else
-#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x3_t __s1 = __p1; \
-  int8x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \
-})
-#else
-#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x3_t __s1 = __p1; \
-  float32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \
-})
-#else
-#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x3_t __s1 = __p1; \
-  int32x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \
-})
-#else
-#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x3_t __s1 = __p1; \
-  int16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \
-})
-#else
-#define vst4_p8(__p0, __p1) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \
-})
-#else
-#define vst4_p16(__p0, __p1) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \
-})
-#else
-#define vst4q_p8(__p0, __p1) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \
-})
-#else
-#define vst4q_p16(__p0, __p1) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \
-})
-#else
-#define vst4q_u8(__p0, __p1) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \
-})
-#else
-#define vst4q_u32(__p0, __p1) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \
-})
-#else
-#define vst4q_u16(__p0, __p1) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \
-})
-#else
-#define vst4q_s8(__p0, __p1) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \
-})
-#else
-#define vst4q_f32(__p0, __p1) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \
-})
-#else
-#define vst4q_s32(__p0, __p1) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \
-})
-#else
-#define vst4q_s16(__p0, __p1) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \
-})
-#else
-#define vst4_u8(__p0, __p1) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \
-})
-#else
-#define vst4_u32(__p0, __p1) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \
-})
-#endif
-
-#define vst4_u64(__p0, __p1) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \
-})
-#else
-#define vst4_u16(__p0, __p1) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \
-})
-#else
-#define vst4_s8(__p0, __p1) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \
-})
-#else
-#define vst4_f32(__p0, __p1) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \
-})
-#else
-#define vst4_s32(__p0, __p1) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \
-})
-#endif
-
-#define vst4_s64(__p0, __p1) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \
-})
-#else
-#define vst4_s16(__p0, __p1) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \
-})
-#else
-#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x8x4_t __s1 = __p1; \
-  poly8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \
-})
-#else
-#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x4x4_t __s1 = __p1; \
-  poly16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \
-})
-#else
-#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \
-  poly16x8x4_t __s1 = __p1; \
-  poly16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \
-})
-#else
-#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x4x4_t __s1 = __p1; \
-  uint32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \
-})
-#else
-#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x8x4_t __s1 = __p1; \
-  uint16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \
-})
-#else
-#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x4x4_t __s1 = __p1; \
-  float32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \
-})
-#else
-#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4x4_t __s1 = __p1; \
-  int32x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \
-})
-#else
-#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8x4_t __s1 = __p1; \
-  int16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \
-})
-#else
-#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x8x4_t __s1 = __p1; \
-  uint8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \
-})
-#else
-#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \
-  uint32x2x4_t __s1 = __p1; \
-  uint32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \
-})
-#else
-#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \
-  uint16x4x4_t __s1 = __p1; \
-  uint16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \
-})
-#else
-#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x8x4_t __s1 = __p1; \
-  int8x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \
-})
-#else
-#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \
-  float32x2x4_t __s1 = __p1; \
-  float32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \
-})
-#else
-#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2x4_t __s1 = __p1; \
-  int32x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \
-})
-#else
-#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4x4_t __s1 = __p1; \
-  int16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) {
-  poly8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) {
-  int8x8x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) {
-  poly8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) {
-  uint8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) {
-  int8x8x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) {
-  poly8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) {
-  uint8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) {
-  int8x8x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37);
-  return __ret;
-}
-#else
-__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#if !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_122, __p1_122) __extension__ ({ \
-  float16x4_t __s0_122 = __p0_122; \
-  float16x8_t __ret_122; \
-  __ret_122 = splatq_lane_f16(__s0_122, __p1_122); \
-  __ret_122; \
-})
-#else
-#define vdupq_lane_f16(__p0_123, __p1_123) __extension__ ({ \
-  float16x4_t __s0_123 = __p0_123; \
-  float16x4_t __rev0_123;  __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 3, 2, 1, 0); \
-  float16x8_t __ret_123; \
-  __ret_123 = __noswap_splatq_lane_f16(__rev0_123, __p1_123); \
-  __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_123; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_124, __p1_124) __extension__ ({ \
-  float16x4_t __s0_124 = __p0_124; \
-  float16x4_t __ret_124; \
-  __ret_124 = splat_lane_f16(__s0_124, __p1_124); \
-  __ret_124; \
-})
-#else
-#define vdup_lane_f16(__p0_125, __p1_125) __extension__ ({ \
-  float16x4_t __s0_125 = __p0_125; \
-  float16x4_t __rev0_125;  __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \
-  float16x4_t __ret_125; \
-  __ret_125 = __noswap_splat_lane_f16(__rev0_125, __p1_125); \
-  __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \
-  __ret_125; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdupq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vdup_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmovq_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret; \
-})
-#else
-#define vmov_n_f16(__p0) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0_126, __p1_126, __p2_126) __extension__ ({ \
-  int32x4_t __s0_126 = __p0_126; \
-  int32x2_t __s1_126 = __p1_126; \
-  int32x4_t __ret_126; \
-  __ret_126 = vqdmulhq_s32(__s0_126, splatq_lane_s32(__s1_126, __p2_126)); \
-  __ret_126; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0_127, __p1_127, __p2_127) __extension__ ({ \
-  int32x4_t __s0_127 = __p0_127; \
-  int32x2_t __s1_127 = __p1_127; \
-  int32x4_t __rev0_127;  __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \
-  int32x2_t __rev1_127;  __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \
-  int32x4_t __ret_127; \
-  __ret_127 = __noswap_vqdmulhq_s32(__rev0_127, __noswap_splatq_lane_s32(__rev1_127, __p2_127)); \
-  __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \
-  __ret_127; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0_128, __p1_128, __p2_128) __extension__ ({ \
-  int16x8_t __s0_128 = __p0_128; \
-  int16x4_t __s1_128 = __p1_128; \
-  int16x8_t __ret_128; \
-  __ret_128 = vqdmulhq_s16(__s0_128, splatq_lane_s16(__s1_128, __p2_128)); \
-  __ret_128; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0_129, __p1_129, __p2_129) __extension__ ({ \
-  int16x8_t __s0_129 = __p0_129; \
-  int16x4_t __s1_129 = __p1_129; \
-  int16x8_t __rev0_129;  __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_129;  __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \
-  int16x8_t __ret_129; \
-  __ret_129 = __noswap_vqdmulhq_s16(__rev0_129, __noswap_splatq_lane_s16(__rev1_129, __p2_129)); \
-  __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_129; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \
-  int32x2_t __s0_130 = __p0_130; \
-  int32x2_t __s1_130 = __p1_130; \
-  int32x2_t __ret_130; \
-  __ret_130 = vqdmulh_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \
-  __ret_130; \
-})
-#else
-#define vqdmulh_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \
-  int32x2_t __s0_131 = __p0_131; \
-  int32x2_t __s1_131 = __p1_131; \
-  int32x2_t __rev0_131;  __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \
-  int32x2_t __rev1_131;  __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \
-  int32x2_t __ret_131; \
-  __ret_131 = __noswap_vqdmulh_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \
-  __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \
-  __ret_131; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \
-  int16x4_t __s0_132 = __p0_132; \
-  int16x4_t __s1_132 = __p1_132; \
-  int16x4_t __ret_132; \
-  __ret_132 = vqdmulh_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \
-  __ret_132; \
-})
-#else
-#define vqdmulh_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \
-  int16x4_t __s0_133 = __p0_133; \
-  int16x4_t __s1_133 = __p1_133; \
-  int16x4_t __rev0_133;  __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \
-  int16x4_t __rev1_133;  __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \
-  int16x4_t __ret_133; \
-  __ret_133 = __noswap_vqdmulh_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \
-  __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \
-  __ret_133; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0_134, __p1_134, __p2_134) __extension__ ({ \
-  int32x4_t __s0_134 = __p0_134; \
-  int32x2_t __s1_134 = __p1_134; \
-  int32x4_t __ret_134; \
-  __ret_134 = vqrdmulhq_s32(__s0_134, splatq_lane_s32(__s1_134, __p2_134)); \
-  __ret_134; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0_135, __p1_135, __p2_135) __extension__ ({ \
-  int32x4_t __s0_135 = __p0_135; \
-  int32x2_t __s1_135 = __p1_135; \
-  int32x4_t __rev0_135;  __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \
-  int32x2_t __rev1_135;  __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 1, 0); \
-  int32x4_t __ret_135; \
-  __ret_135 = __noswap_vqrdmulhq_s32(__rev0_135, __noswap_splatq_lane_s32(__rev1_135, __p2_135)); \
-  __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \
-  __ret_135; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0_136, __p1_136, __p2_136) __extension__ ({ \
-  int16x8_t __s0_136 = __p0_136; \
-  int16x4_t __s1_136 = __p1_136; \
-  int16x8_t __ret_136; \
-  __ret_136 = vqrdmulhq_s16(__s0_136, splatq_lane_s16(__s1_136, __p2_136)); \
-  __ret_136; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0_137, __p1_137, __p2_137) __extension__ ({ \
-  int16x8_t __s0_137 = __p0_137; \
-  int16x4_t __s1_137 = __p1_137; \
-  int16x8_t __rev0_137;  __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_137;  __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 3, 2, 1, 0); \
-  int16x8_t __ret_137; \
-  __ret_137 = __noswap_vqrdmulhq_s16(__rev0_137, __noswap_splatq_lane_s16(__rev1_137, __p2_137)); \
-  __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_137; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0_138, __p1_138, __p2_138) __extension__ ({ \
-  int32x2_t __s0_138 = __p0_138; \
-  int32x2_t __s1_138 = __p1_138; \
-  int32x2_t __ret_138; \
-  __ret_138 = vqrdmulh_s32(__s0_138, splat_lane_s32(__s1_138, __p2_138)); \
-  __ret_138; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0_139, __p1_139, __p2_139) __extension__ ({ \
-  int32x2_t __s0_139 = __p0_139; \
-  int32x2_t __s1_139 = __p1_139; \
-  int32x2_t __rev0_139;  __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \
-  int32x2_t __rev1_139;  __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \
-  int32x2_t __ret_139; \
-  __ret_139 = __noswap_vqrdmulh_s32(__rev0_139, __noswap_splat_lane_s32(__rev1_139, __p2_139)); \
-  __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \
-  __ret_139; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0_140, __p1_140, __p2_140) __extension__ ({ \
-  int16x4_t __s0_140 = __p0_140; \
-  int16x4_t __s1_140 = __p1_140; \
-  int16x4_t __ret_140; \
-  __ret_140 = vqrdmulh_s16(__s0_140, splat_lane_s16(__s1_140, __p2_140)); \
-  __ret_140; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0_141, __p1_141, __p2_141) __extension__ ({ \
-  int16x4_t __s0_141 = __p0_141; \
-  int16x4_t __s1_141 = __p1_141; \
-  int16x4_t __rev0_141;  __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \
-  int16x4_t __rev1_141;  __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \
-  int16x4_t __ret_141; \
-  __ret_141 = __noswap_vqrdmulh_s16(__rev0_141, __noswap_splat_lane_s16(__rev1_141, __p2_141)); \
-  __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \
-  __ret_141; \
-})
-#endif
-
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if (__ARM_FP & 2)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f16(__p0) __extension__ ({ \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_dup_v(__p0, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_dup_f16(__p0) __extension__ ({ \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_dup_v(__p0, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vld1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x2(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x2(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x3(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x3(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld1q_f16_x4(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld1_f16_x4(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f16(__p0) __extension__ ({ \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld2_dup_f16(__p0) __extension__ ({ \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  float16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f16(__p0) __extension__ ({ \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld3_dup_f16(__p0) __extension__ ({ \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  float16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f16(__p0) __extension__ ({ \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
-  __ret; \
-})
-#else
-#define vld4_dup_f16(__p0) __extension__ ({ \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-  __ret; \
-})
-#else
-#define vld4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  float16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 40); \
-})
-#else
-#define vst1q_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 8); \
-})
-#else
-#define vst1_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 40); \
-})
-#else
-#define vst1q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 8); \
-})
-#else
-#define vst1_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst1q_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst1_f16_x2(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst1q_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst1_f16_x3(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst1q_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst1_f16_x4(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 40); \
-})
-#else
-#define vst2q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 8); \
-})
-#else
-#define vst2_f16(__p0, __p1) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 40); \
-})
-#else
-#define vst2q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x2_t __s1 = __p1; \
-  float16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 8); \
-})
-#else
-#define vst2_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x2_t __s1 = __p1; \
-  float16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 40); \
-})
-#else
-#define vst3q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 8); \
-})
-#else
-#define vst3_f16(__p0, __p1) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 40); \
-})
-#else
-#define vst3q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x3_t __s1 = __p1; \
-  float16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 8); \
-})
-#else
-#define vst3_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x3_t __s1 = __p1; \
-  float16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 40); \
-})
-#else
-#define vst4q_f16(__p0, __p1) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 8); \
-})
-#else
-#define vst4_f16(__p0, __p1) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 8); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 40); \
-})
-#else
-#define vst4q_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8x4_t __s1 = __p1; \
-  float16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 40); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 8); \
-})
-#else
-#define vst4_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4x4_t __s1 = __p1; \
-  float16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 8); \
-})
-#endif
-
-#endif
-#if __ARM_ARCH >= 8
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_CRYPTO)
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaeseq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesimcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesimcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vaesmcq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vaesmcq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint32_t vsha1h_u32(uint32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__rev0, __p1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha1su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256hq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256h2q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su0q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsha256su1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnd_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndaq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrnda_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndiq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndi_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndmq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndm_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndnq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndn_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float32_t vrndns_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrndns_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndpq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndp_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vrndxq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vrndx_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnd_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndaq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrnda_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndmq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndm_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndnq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndn_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndpq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndp_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndxq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndx_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnd_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndaq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrnda_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndiq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndi_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndmq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndm_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndnq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndn_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndpq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndp_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrndxq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrndx_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#endif
-#if __ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && !defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16) && defined(__aarch64__)
-__ai poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t)(__p0);
-  return __ret;
-}
-__ai poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t)(__p0);
-  return __ret;
-}
-__ai poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) {
-  poly16x4_t __ret;
-  __ret = (poly16x4_t)(__p0);
-  return __ret;
-}
-__ai poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t)(__p0);
-  return __ret;
-}
-__ai poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) {
-  poly128_t __ret;
-  __ret = (poly128_t)(__p0);
-  return __ret;
-}
-__ai poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t)(__p0);
-  return __ret;
-}
-__ai poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) {
-  poly16x8_t __ret;
-  __ret = (poly16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t)(__p0);
-  return __ret;
-}
-__ai uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__p0);
-  return __ret;
-}
-__ai uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0);
-  return __ret;
-}
-__ai uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0);
-  return __ret;
-}
-__ai int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t)(__p0);
-  return __ret;
-}
-__ai float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t)(__p0);
-  return __ret;
-}
-__ai float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t)(__p0);
-  return __ret;
-}
-__ai float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t)(__p0);
-  return __ret;
-}
-__ai int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__p0);
-  return __ret;
-}
-__ai int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__p0);
-  return __ret;
-}
-__ai int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__p0);
-  return __ret;
-}
-__ai uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t)(__p0);
-  return __ret;
-}
-__ai uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t)(__p0);
-  return __ret;
-}
-__ai uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0);
-  return __ret;
-}
-__ai uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0);
-  return __ret;
-}
-__ai int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t)(__p0);
-  return __ret;
-}
-__ai float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t)(__p0);
-  return __ret;
-}
-__ai float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t)(__p0);
-  return __ret;
-}
-__ai float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t)(__p0);
-  return __ret;
-}
-__ai int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) {
-  int32x2_t __ret;
-  __ret = (int32x2_t)(__p0);
-  return __ret;
-}
-__ai int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t)(__p0);
-  return __ret;
-}
-__ai int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-__ai bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t)(__p0);
-  return __ret;
-}
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#else
-#define splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 11); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#else
-#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 43); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vbfdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_lane_f32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \
-  float32x4_t __s0_142 = __p0_142; \
-  bfloat16x8_t __s1_142 = __p1_142; \
-  bfloat16x4_t __s2_142 = __p2_142; \
-  float32x4_t __ret_142; \
-bfloat16x4_t __reint_142 = __s2_142; \
-float32x4_t __reint1_142 = splatq_lane_f32(*(float32x2_t *) &__reint_142, __p3_142); \
-  __ret_142 = vbfdotq_f32(__s0_142, __s1_142, *(bfloat16x8_t *) &__reint1_142); \
-  __ret_142; \
-})
-#else
-#define vbfdotq_lane_f32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \
-  float32x4_t __s0_143 = __p0_143; \
-  bfloat16x8_t __s1_143 = __p1_143; \
-  bfloat16x4_t __s2_143 = __p2_143; \
-  float32x4_t __rev0_143;  __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_143;  __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_143;  __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 3, 2, 1, 0); \
-  float32x4_t __ret_143; \
-bfloat16x4_t __reint_143 = __rev2_143; \
-float32x4_t __reint1_143 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_143, __p3_143); \
-  __ret_143 = __noswap_vbfdotq_f32(__rev0_143, __rev1_143, *(bfloat16x8_t *) &__reint1_143); \
-  __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \
-  __ret_143; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_lane_f32(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \
-  float32x2_t __s0_144 = __p0_144; \
-  bfloat16x4_t __s1_144 = __p1_144; \
-  bfloat16x4_t __s2_144 = __p2_144; \
-  float32x2_t __ret_144; \
-bfloat16x4_t __reint_144 = __s2_144; \
-float32x2_t __reint1_144 = splat_lane_f32(*(float32x2_t *) &__reint_144, __p3_144); \
-  __ret_144 = vbfdot_f32(__s0_144, __s1_144, *(bfloat16x4_t *) &__reint1_144); \
-  __ret_144; \
-})
-#else
-#define vbfdot_lane_f32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \
-  float32x2_t __s0_145 = __p0_145; \
-  bfloat16x4_t __s1_145 = __p1_145; \
-  bfloat16x4_t __s2_145 = __p2_145; \
-  float32x2_t __rev0_145;  __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 1, 0); \
-  bfloat16x4_t __rev1_145;  __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_145;  __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \
-  float32x2_t __ret_145; \
-bfloat16x4_t __reint_145 = __rev2_145; \
-float32x2_t __reint1_145 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_145, __p3_145); \
-  __ret_145 = __noswap_vbfdot_f32(__rev0_145, __rev1_145, *(bfloat16x4_t *) &__reint1_145); \
-  __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 1, 0); \
-  __ret_145; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdotq_laneq_f32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \
-  float32x4_t __s0_146 = __p0_146; \
-  bfloat16x8_t __s1_146 = __p1_146; \
-  bfloat16x8_t __s2_146 = __p2_146; \
-  float32x4_t __ret_146; \
-bfloat16x8_t __reint_146 = __s2_146; \
-float32x4_t __reint1_146 = splatq_laneq_f32(*(float32x4_t *) &__reint_146, __p3_146); \
-  __ret_146 = vbfdotq_f32(__s0_146, __s1_146, *(bfloat16x8_t *) &__reint1_146); \
-  __ret_146; \
-})
-#else
-#define vbfdotq_laneq_f32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \
-  float32x4_t __s0_147 = __p0_147; \
-  bfloat16x8_t __s1_147 = __p1_147; \
-  bfloat16x8_t __s2_147 = __p2_147; \
-  float32x4_t __rev0_147;  __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_147;  __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_147;  __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_147; \
-bfloat16x8_t __reint_147 = __rev2_147; \
-float32x4_t __reint1_147 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_147, __p3_147); \
-  __ret_147 = __noswap_vbfdotq_f32(__rev0_147, __rev1_147, *(bfloat16x8_t *) &__reint1_147); \
-  __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 3, 2, 1, 0); \
-  __ret_147; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfdot_laneq_f32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \
-  float32x2_t __s0_148 = __p0_148; \
-  bfloat16x4_t __s1_148 = __p1_148; \
-  bfloat16x8_t __s2_148 = __p2_148; \
-  float32x2_t __ret_148; \
-bfloat16x8_t __reint_148 = __s2_148; \
-float32x2_t __reint1_148 = splat_laneq_f32(*(float32x4_t *) &__reint_148, __p3_148); \
-  __ret_148 = vbfdot_f32(__s0_148, __s1_148, *(bfloat16x4_t *) &__reint1_148); \
-  __ret_148; \
-})
-#else
-#define vbfdot_laneq_f32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \
-  float32x2_t __s0_149 = __p0_149; \
-  bfloat16x4_t __s1_149 = __p1_149; \
-  bfloat16x8_t __s2_149 = __p2_149; \
-  float32x2_t __rev0_149;  __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 1, 0); \
-  bfloat16x4_t __rev1_149;  __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_149;  __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_149; \
-bfloat16x8_t __reint_149 = __rev2_149; \
-float32x2_t __reint1_149 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_149, __p3_149); \
-  __ret_149 = __noswap_vbfdot_f32(__rev0_149, __rev1_149, *(bfloat16x4_t *) &__reint1_149); \
-  __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 1, 0); \
-  __ret_149; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlalbq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmlaltq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vbfmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#define vcreate_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (bfloat16x4_t)(__promote); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_150) {
-  float32x4_t __ret_150;
-bfloat16x4_t __reint_150 = __p0_150;
-int32x4_t __reint1_150 = vshll_n_s16(*(int16x4_t *) &__reint_150, 16);
-  __ret_150 = *(float32x4_t *) &__reint1_150;
-  return __ret_150;
-}
-#else
-__ai float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_151) {
-  bfloat16x4_t __rev0_151;  __rev0_151 = __builtin_shufflevector(__p0_151, __p0_151, 3, 2, 1, 0);
-  float32x4_t __ret_151;
-bfloat16x4_t __reint_151 = __rev0_151;
-int32x4_t __reint1_151 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_151, 16);
-  __ret_151 = *(float32x4_t *) &__reint1_151;
-  __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0);
-  return __ret_151;
-}
-__ai float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_152) {
-  float32x4_t __ret_152;
-bfloat16x4_t __reint_152 = __p0_152;
-int32x4_t __reint1_152 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_152, 16);
-  __ret_152 = *(float32x4_t *) &__reint1_152;
-  return __ret_152;
-}
-#endif
-
-__ai float32_t vcvtah_f32_bf16(bfloat16_t __p0) {
-  float32_t __ret;
-bfloat16_t __reint = __p0;
-int32_t __reint1 = *(int32_t *) &__reint << 16;
-  __ret = *(float32_t *) &__reint1;
-  return __ret;
-}
-__ai bfloat16_t vcvth_bf16_f32(float32_t __p0) {
-  bfloat16_t __ret;
-  __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_bf16(__p0_153, __p1_153) __extension__ ({ \
-  bfloat16x4_t __s0_153 = __p0_153; \
-  bfloat16x8_t __ret_153; \
-  __ret_153 = splatq_lane_bf16(__s0_153, __p1_153); \
-  __ret_153; \
-})
-#else
-#define vdupq_lane_bf16(__p0_154, __p1_154) __extension__ ({ \
-  bfloat16x4_t __s0_154 = __p0_154; \
-  bfloat16x4_t __rev0_154;  __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_154; \
-  __ret_154 = __noswap_splatq_lane_bf16(__rev0_154, __p1_154); \
-  __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_154; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_bf16(__p0_155, __p1_155) __extension__ ({ \
-  bfloat16x4_t __s0_155 = __p0_155; \
-  bfloat16x4_t __ret_155; \
-  __ret_155 = splat_lane_bf16(__s0_155, __p1_155); \
-  __ret_155; \
-})
-#else
-#define vdup_lane_bf16(__p0_156, __p1_156) __extension__ ({ \
-  bfloat16x4_t __s0_156 = __p0_156; \
-  bfloat16x4_t __rev0_156;  __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_156; \
-  __ret_156 = __noswap_splat_lane_bf16(__rev0_156, __p1_156); \
-  __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \
-  __ret_156; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_bf16(__p0_157, __p1_157) __extension__ ({ \
-  bfloat16x8_t __s0_157 = __p0_157; \
-  bfloat16x8_t __ret_157; \
-  __ret_157 = splatq_laneq_bf16(__s0_157, __p1_157); \
-  __ret_157; \
-})
-#else
-#define vdupq_laneq_bf16(__p0_158, __p1_158) __extension__ ({ \
-  bfloat16x8_t __s0_158 = __p0_158; \
-  bfloat16x8_t __rev0_158;  __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_158; \
-  __ret_158 = __noswap_splatq_laneq_bf16(__rev0_158, __p1_158); \
-  __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_158; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_bf16(__p0_159, __p1_159) __extension__ ({ \
-  bfloat16x8_t __s0_159 = __p0_159; \
-  bfloat16x4_t __ret_159; \
-  __ret_159 = splat_laneq_bf16(__s0_159, __p1_159); \
-  __ret_159; \
-})
-#else
-#define vdup_laneq_bf16(__p0_160, __p1_160) __extension__ ({ \
-  bfloat16x8_t __s0_160 = __p0_160; \
-  bfloat16x8_t __rev0_160;  __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_160; \
-  __ret_160 = __noswap_splat_laneq_bf16(__rev0_160, __p1_160); \
-  __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 3, 2, 1, 0); \
-  __ret_160; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s0 = __p0; \
-  bfloat16_t __ret; \
-  __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_v(__p0, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_v(__p0, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-  __ret; \
-})
-#else
-#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-  __ret; \
-})
-#else
-#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x2(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x3(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld1q_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld1_bf16_x4(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld2q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld2_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  bfloat16x4x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld3q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld3_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  bfloat16x4x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
-  __ret; \
-})
-#else
-#define vld4q_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
-  __ret; \
-})
-#else
-#define vld4_dup_bf16(__p0) __extension__ ({ \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-  __ret; \
-})
-#else
-#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-  __ret; \
-})
-#else
-#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  bfloat16x4x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __ret; \
-  __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16_t __s0 = __p0; \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __ret; \
-  __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 43); \
-})
-#else
-#define vst1q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 11); \
-})
-#else
-#define vst1_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 43); \
-})
-#else
-#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8_t __s1 = __p1; \
-  bfloat16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 11); \
-})
-#else
-#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4_t __s1 = __p1; \
-  bfloat16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \
-})
-#else
-#define vst2q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \
-})
-#else
-#define vst2_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \
-})
-#else
-#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x2_t __s1 = __p1; \
-  bfloat16x8x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \
-})
-#else
-#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x2_t __s1 = __p1; \
-  bfloat16x4x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \
-})
-#else
-#define vst3q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \
-})
-#else
-#define vst3_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \
-})
-#else
-#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x3_t __s1 = __p1; \
-  bfloat16x8x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \
-})
-#else
-#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x3_t __s1 = __p1; \
-  bfloat16x4x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \
-})
-#else
-#define vst4q_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \
-})
-#else
-#define vst4_bf16(__p0, __p1) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \
-})
-#else
-#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x8x4_t __s1 = __p1; \
-  bfloat16x8x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \
-})
-#else
-#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \
-  bfloat16x4x4_t __s1 = __p1; \
-  bfloat16x4x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && !defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__rev0, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_v((int8x16_t)__p0, 11);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = __a32_vcvt_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap___a32_vcvt_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__rev0, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_v((int8x16_t)__p0, 43);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_bf16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \
-  bfloat16x8_t __s0_161 = __p0_161; \
-  bfloat16x4_t __s2_161 = __p2_161; \
-  bfloat16x8_t __ret_161; \
-  __ret_161 = vsetq_lane_bf16(vget_lane_bf16(__s2_161, __p3_161), __s0_161, __p1_161); \
-  __ret_161; \
-})
-#else
-#define vcopyq_lane_bf16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \
-  bfloat16x8_t __s0_162 = __p0_162; \
-  bfloat16x4_t __s2_162 = __p2_162; \
-  bfloat16x8_t __rev0_162;  __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_162;  __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_162; \
-  __ret_162 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_162, __p3_162), __rev0_162, __p1_162); \
-  __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_162; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_bf16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \
-  bfloat16x4_t __s0_163 = __p0_163; \
-  bfloat16x4_t __s2_163 = __p2_163; \
-  bfloat16x4_t __ret_163; \
-  __ret_163 = vset_lane_bf16(vget_lane_bf16(__s2_163, __p3_163), __s0_163, __p1_163); \
-  __ret_163; \
-})
-#else
-#define vcopy_lane_bf16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \
-  bfloat16x4_t __s0_164 = __p0_164; \
-  bfloat16x4_t __s2_164 = __p2_164; \
-  bfloat16x4_t __rev0_164;  __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_164;  __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_164; \
-  __ret_164 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_164, __p3_164), __rev0_164, __p1_164); \
-  __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 3, 2, 1, 0); \
-  __ret_164; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_bf16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \
-  bfloat16x8_t __s0_165 = __p0_165; \
-  bfloat16x8_t __s2_165 = __p2_165; \
-  bfloat16x8_t __ret_165; \
-  __ret_165 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_165, __p3_165), __s0_165, __p1_165); \
-  __ret_165; \
-})
-#else
-#define vcopyq_laneq_bf16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \
-  bfloat16x8_t __s0_166 = __p0_166; \
-  bfloat16x8_t __s2_166 = __p2_166; \
-  bfloat16x8_t __rev0_166;  __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_166;  __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __ret_166; \
-  __ret_166 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_166, __p3_166), __rev0_166, __p1_166); \
-  __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_166; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_bf16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \
-  bfloat16x4_t __s0_167 = __p0_167; \
-  bfloat16x8_t __s2_167 = __p2_167; \
-  bfloat16x4_t __ret_167; \
-  __ret_167 = vset_lane_bf16(vgetq_lane_bf16(__s2_167, __p3_167), __s0_167, __p1_167); \
-  __ret_167; \
-})
-#else
-#define vcopy_laneq_bf16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \
-  bfloat16x4_t __s0_168 = __p0_168; \
-  bfloat16x8_t __s2_168 = __p2_168; \
-  bfloat16x4_t __rev0_168;  __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_168;  __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __ret_168; \
-  __ret_168 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_168, __p3_168), __rev0_168, __p1_168); \
-  __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \
-  __ret_168; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  bfloat16x4_t __ret;
-  __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0));
-  return __ret;
-}
-#else
-__ai bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x4_t __ret;
-  __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__p0, (int8x16_t)__p1, 43);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_v((int8x16_t)__rev0, (int8x16_t)__rev1, 43);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  bfloat16x8_t __ret;
-  __ret = __a64_vcvtq_low_bf16_f32(__p0);
-  return __ret;
-}
-#else
-__ai bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  bfloat16x8_t __ret;
-  __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot270_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcadd_rot90_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_COMPLEX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_u32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \
-  uint32x4_t __s0_169 = __p0_169; \
-  uint8x16_t __s1_169 = __p1_169; \
-  uint8x8_t __s2_169 = __p2_169; \
-  uint32x4_t __ret_169; \
-uint8x8_t __reint_169 = __s2_169; \
-uint32x4_t __reint1_169 = splatq_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169); \
-  __ret_169 = vdotq_u32(__s0_169, __s1_169, *(uint8x16_t *) &__reint1_169); \
-  __ret_169; \
-})
-#else
-#define vdotq_lane_u32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \
-  uint32x4_t __s0_170 = __p0_170; \
-  uint8x16_t __s1_170 = __p1_170; \
-  uint8x8_t __s2_170 = __p2_170; \
-  uint32x4_t __rev0_170;  __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 3, 2, 1, 0); \
-  uint8x16_t __rev1_170;  __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_170;  __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_170; \
-uint8x8_t __reint_170 = __rev2_170; \
-uint32x4_t __reint1_170 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_170, __p3_170); \
-  __ret_170 = __noswap_vdotq_u32(__rev0_170, __rev1_170, *(uint8x16_t *) &__reint1_170); \
-  __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 3, 2, 1, 0); \
-  __ret_170; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_lane_s32(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \
-  int32x4_t __s0_171 = __p0_171; \
-  int8x16_t __s1_171 = __p1_171; \
-  int8x8_t __s2_171 = __p2_171; \
-  int32x4_t __ret_171; \
-int8x8_t __reint_171 = __s2_171; \
-int32x4_t __reint1_171 = splatq_lane_s32(*(int32x2_t *) &__reint_171, __p3_171); \
-  __ret_171 = vdotq_s32(__s0_171, __s1_171, *(int8x16_t *) &__reint1_171); \
-  __ret_171; \
-})
-#else
-#define vdotq_lane_s32(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \
-  int32x4_t __s0_172 = __p0_172; \
-  int8x16_t __s1_172 = __p1_172; \
-  int8x8_t __s2_172 = __p2_172; \
-  int32x4_t __rev0_172;  __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 3, 2, 1, 0); \
-  int8x16_t __rev1_172;  __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_172;  __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_172; \
-int8x8_t __reint_172 = __rev2_172; \
-int32x4_t __reint1_172 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_172, __p3_172); \
-  __ret_172 = __noswap_vdotq_s32(__rev0_172, __rev1_172, *(int8x16_t *) &__reint1_172); \
-  __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 3, 2, 1, 0); \
-  __ret_172; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_u32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \
-  uint32x2_t __s0_173 = __p0_173; \
-  uint8x8_t __s1_173 = __p1_173; \
-  uint8x8_t __s2_173 = __p2_173; \
-  uint32x2_t __ret_173; \
-uint8x8_t __reint_173 = __s2_173; \
-uint32x2_t __reint1_173 = splat_lane_u32(*(uint32x2_t *) &__reint_173, __p3_173); \
-  __ret_173 = vdot_u32(__s0_173, __s1_173, *(uint8x8_t *) &__reint1_173); \
-  __ret_173; \
-})
-#else
-#define vdot_lane_u32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \
-  uint32x2_t __s0_174 = __p0_174; \
-  uint8x8_t __s1_174 = __p1_174; \
-  uint8x8_t __s2_174 = __p2_174; \
-  uint32x2_t __rev0_174;  __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \
-  uint8x8_t __rev1_174;  __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_174;  __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_174; \
-uint8x8_t __reint_174 = __rev2_174; \
-uint32x2_t __reint1_174 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174); \
-  __ret_174 = __noswap_vdot_u32(__rev0_174, __rev1_174, *(uint8x8_t *) &__reint1_174); \
-  __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \
-  __ret_174; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_lane_s32(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \
-  int32x2_t __s0_175 = __p0_175; \
-  int8x8_t __s1_175 = __p1_175; \
-  int8x8_t __s2_175 = __p2_175; \
-  int32x2_t __ret_175; \
-int8x8_t __reint_175 = __s2_175; \
-int32x2_t __reint1_175 = splat_lane_s32(*(int32x2_t *) &__reint_175, __p3_175); \
-  __ret_175 = vdot_s32(__s0_175, __s1_175, *(int8x8_t *) &__reint1_175); \
-  __ret_175; \
-})
-#else
-#define vdot_lane_s32(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \
-  int32x2_t __s0_176 = __p0_176; \
-  int8x8_t __s1_176 = __p1_176; \
-  int8x8_t __s2_176 = __p2_176; \
-  int32x2_t __rev0_176;  __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 1, 0); \
-  int8x8_t __rev1_176;  __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_176;  __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_176; \
-int8x8_t __reint_176 = __rev2_176; \
-int32x2_t __reint1_176 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_176, __p3_176); \
-  __ret_176 = __noswap_vdot_s32(__rev0_176, __rev1_176, *(int8x8_t *) &__reint1_176); \
-  __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 1, 0); \
-  __ret_176; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_u32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \
-  uint32x4_t __s0_177 = __p0_177; \
-  uint8x16_t __s1_177 = __p1_177; \
-  uint8x16_t __s2_177 = __p2_177; \
-  uint32x4_t __ret_177; \
-uint8x16_t __reint_177 = __s2_177; \
-uint32x4_t __reint1_177 = splatq_laneq_u32(*(uint32x4_t *) &__reint_177, __p3_177); \
-  __ret_177 = vdotq_u32(__s0_177, __s1_177, *(uint8x16_t *) &__reint1_177); \
-  __ret_177; \
-})
-#else
-#define vdotq_laneq_u32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \
-  uint32x4_t __s0_178 = __p0_178; \
-  uint8x16_t __s1_178 = __p1_178; \
-  uint8x16_t __s2_178 = __p2_178; \
-  uint32x4_t __rev0_178;  __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 3, 2, 1, 0); \
-  uint8x16_t __rev1_178;  __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_178;  __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_178; \
-uint8x16_t __reint_178 = __rev2_178; \
-uint32x4_t __reint1_178 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_178, __p3_178); \
-  __ret_178 = __noswap_vdotq_u32(__rev0_178, __rev1_178, *(uint8x16_t *) &__reint1_178); \
-  __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 3, 2, 1, 0); \
-  __ret_178; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdotq_laneq_s32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \
-  int32x4_t __s0_179 = __p0_179; \
-  int8x16_t __s1_179 = __p1_179; \
-  int8x16_t __s2_179 = __p2_179; \
-  int32x4_t __ret_179; \
-int8x16_t __reint_179 = __s2_179; \
-int32x4_t __reint1_179 = splatq_laneq_s32(*(int32x4_t *) &__reint_179, __p3_179); \
-  __ret_179 = vdotq_s32(__s0_179, __s1_179, *(int8x16_t *) &__reint1_179); \
-  __ret_179; \
-})
-#else
-#define vdotq_laneq_s32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \
-  int32x4_t __s0_180 = __p0_180; \
-  int8x16_t __s1_180 = __p1_180; \
-  int8x16_t __s2_180 = __p2_180; \
-  int32x4_t __rev0_180;  __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \
-  int8x16_t __rev1_180;  __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_180;  __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_180; \
-int8x16_t __reint_180 = __rev2_180; \
-int32x4_t __reint1_180 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_180, __p3_180); \
-  __ret_180 = __noswap_vdotq_s32(__rev0_180, __rev1_180, *(int8x16_t *) &__reint1_180); \
-  __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \
-  __ret_180; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_u32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \
-  uint32x2_t __s0_181 = __p0_181; \
-  uint8x8_t __s1_181 = __p1_181; \
-  uint8x16_t __s2_181 = __p2_181; \
-  uint32x2_t __ret_181; \
-uint8x16_t __reint_181 = __s2_181; \
-uint32x2_t __reint1_181 = splat_laneq_u32(*(uint32x4_t *) &__reint_181, __p3_181); \
-  __ret_181 = vdot_u32(__s0_181, __s1_181, *(uint8x8_t *) &__reint1_181); \
-  __ret_181; \
-})
-#else
-#define vdot_laneq_u32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \
-  uint32x2_t __s0_182 = __p0_182; \
-  uint8x8_t __s1_182 = __p1_182; \
-  uint8x16_t __s2_182 = __p2_182; \
-  uint32x2_t __rev0_182;  __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \
-  uint8x8_t __rev1_182;  __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_182;  __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x2_t __ret_182; \
-uint8x16_t __reint_182 = __rev2_182; \
-uint32x2_t __reint1_182 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_182, __p3_182); \
-  __ret_182 = __noswap_vdot_u32(__rev0_182, __rev1_182, *(uint8x8_t *) &__reint1_182); \
-  __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \
-  __ret_182; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdot_laneq_s32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \
-  int32x2_t __s0_183 = __p0_183; \
-  int8x8_t __s1_183 = __p1_183; \
-  int8x16_t __s2_183 = __p2_183; \
-  int32x2_t __ret_183; \
-int8x16_t __reint_183 = __s2_183; \
-int32x2_t __reint1_183 = splat_laneq_s32(*(int32x4_t *) &__reint_183, __p3_183); \
-  __ret_183 = vdot_s32(__s0_183, __s1_183, *(int8x8_t *) &__reint1_183); \
-  __ret_183; \
-})
-#else
-#define vdot_laneq_s32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \
-  int32x2_t __s0_184 = __p0_184; \
-  int8x8_t __s1_184 = __p1_184; \
-  int8x16_t __s2_184 = __p2_184; \
-  int32x2_t __rev0_184;  __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 1, 0); \
-  int8x8_t __rev1_184;  __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_184;  __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_184; \
-int8x16_t __reint_184 = __rev2_184; \
-int32x2_t __reint1_184 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_184, __p3_184); \
-  __ret_184 = __noswap_vdot_s32(__rev0_184, __rev1_184, *(int8x8_t *) &__reint1_184); \
-  __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 1, 0); \
-  __ret_184; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FMA)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, __rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, __rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlalq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlal_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_high_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_high_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vfmlslq_low_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vfmlsl_low_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vabsq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vabs_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai float16x8_t vcvtq_f16_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vcvtq_f16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai float16x4_t vcvt_f16_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vcvt_f16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__s0, __p1, 33); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_v((int8x16_t)__rev0, __p1, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__s0, __p1, 1); \
-  __ret; \
-})
-#else
-#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_v((int8x8_t)__rev0, __p1, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__s0, __p1, 49); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret; \
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_v((int8x16_t)__rev0, __p1, 49); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__s0, __p1, 17); \
-  __ret; \
-})
-#else
-#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16x4_t __ret; \
-  __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_v((int8x8_t)__rev0, __p1, 17); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvt_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvt_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvt_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvt_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtaq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvta_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvta_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvta_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvta_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtmq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtm_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtm_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtm_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtnq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtn_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtn_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtn_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__p0, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vcvtpq_s16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_v((int8x16_t)__rev0, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__p0, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vcvtp_s16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vcvtp_s16_v((int8x8_t)__rev0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcvtp_u16_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \
-  __ret; \
-})
-#else
-#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \
-  __ret; \
-})
-#else
-#define vext_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __ret;
-  __ret = vfmaq_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __ret;
-  __ret = vfma_f16(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f16(__p0_185, __p1_185, __p2_185) __extension__ ({ \
-  float16x8_t __s0_185 = __p0_185; \
-  float16x4_t __s1_185 = __p1_185; \
-  float16x8_t __ret_185; \
-  __ret_185 = __s0_185 * splatq_lane_f16(__s1_185, __p2_185); \
-  __ret_185; \
-})
-#else
-#define vmulq_lane_f16(__p0_186, __p1_186, __p2_186) __extension__ ({ \
-  float16x8_t __s0_186 = __p0_186; \
-  float16x4_t __s1_186 = __p1_186; \
-  float16x8_t __rev0_186;  __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_186;  __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 3, 2, 1, 0); \
-  float16x8_t __ret_186; \
-  __ret_186 = __rev0_186 * __noswap_splatq_lane_f16(__rev1_186, __p2_186); \
-  __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_186; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_lane_f16(__p0_187, __p1_187, __p2_187) __extension__ ({ \
-  float16x4_t __s0_187 = __p0_187; \
-  float16x4_t __s1_187 = __p1_187; \
-  float16x4_t __ret_187; \
-  __ret_187 = __s0_187 * splat_lane_f16(__s1_187, __p2_187); \
-  __ret_187; \
-})
-#else
-#define vmul_lane_f16(__p0_188, __p1_188, __p2_188) __extension__ ({ \
-  float16x4_t __s0_188 = __p0_188; \
-  float16x4_t __s1_188 = __p1_188; \
-  float16x4_t __rev0_188;  __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \
-  float16x4_t __rev1_188;  __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \
-  float16x4_t __ret_188; \
-  __ret_188 = __rev0_188 * __noswap_splat_lane_f16(__rev1_188, __p2_188); \
-  __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \
-  __ret_188; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmulq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret; \
-})
-#else
-#define vmul_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x8_t vnegq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float16x4_t vneg_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpeq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecpe_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4);
-  return __ret;
-}
-#else
-__ai float16x8_t vrev64q_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  return __ret;
-}
-#else
-__ai float16x4_t vrev64_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrteq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrte_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8x2_t __ret;
-  __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4x2_t __ret;
-  __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0);
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x8_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = (float16x4_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_lane_f16(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \
-  float16_t __s0_189 = __p0_189; \
-  float16_t __s1_189 = __p1_189; \
-  float16x4_t __s2_189 = __p2_189; \
-  float16_t __ret_189; \
-  __ret_189 = vfmah_lane_f16(__s0_189, -__s1_189, __s2_189, __p3_189); \
-  __ret_189; \
-})
-#else
-#define vfmsh_lane_f16(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \
-  float16_t __s0_190 = __p0_190; \
-  float16_t __s1_190 = __p1_190; \
-  float16x4_t __s2_190 = __p2_190; \
-  float16x4_t __rev2_190;  __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \
-  float16_t __ret_190; \
-  __ret_190 = __noswap_vfmah_lane_f16(__s0_190, -__s1_190, __rev2_190, __p3_190); \
-  __ret_190; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f16(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \
-  float16x8_t __s0_191 = __p0_191; \
-  float16x8_t __s1_191 = __p1_191; \
-  float16x4_t __s2_191 = __p2_191; \
-  float16x8_t __ret_191; \
-  __ret_191 = vfmaq_lane_f16(__s0_191, -__s1_191, __s2_191, __p3_191); \
-  __ret_191; \
-})
-#else
-#define vfmsq_lane_f16(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \
-  float16x8_t __s0_192 = __p0_192; \
-  float16x8_t __s1_192 = __p1_192; \
-  float16x4_t __s2_192 = __p2_192; \
-  float16x8_t __rev0_192;  __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_192;  __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_192;  __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \
-  float16x8_t __ret_192; \
-  __ret_192 = __noswap_vfmaq_lane_f16(__rev0_192, -__rev1_192, __rev2_192, __p3_192); \
-  __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_192; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f16(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \
-  float16x4_t __s0_193 = __p0_193; \
-  float16x4_t __s1_193 = __p1_193; \
-  float16x4_t __s2_193 = __p2_193; \
-  float16x4_t __ret_193; \
-  __ret_193 = vfma_lane_f16(__s0_193, -__s1_193, __s2_193, __p3_193); \
-  __ret_193; \
-})
-#else
-#define vfms_lane_f16(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \
-  float16x4_t __s0_194 = __p0_194; \
-  float16x4_t __s1_194 = __p1_194; \
-  float16x4_t __s2_194 = __p2_194; \
-  float16x4_t __rev0_194;  __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 3, 2, 1, 0); \
-  float16x4_t __rev1_194;  __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 3, 2, 1, 0); \
-  float16x4_t __rev2_194;  __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 3, 2, 1, 0); \
-  float16x4_t __ret_194; \
-  __ret_194 = __noswap_vfma_lane_f16(__rev0_194, -__rev1_194, __rev2_194, __p3_194); \
-  __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 3, 2, 1, 0); \
-  __ret_194; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsh_laneq_f16(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \
-  float16_t __s0_195 = __p0_195; \
-  float16_t __s1_195 = __p1_195; \
-  float16x8_t __s2_195 = __p2_195; \
-  float16_t __ret_195; \
-  __ret_195 = vfmah_laneq_f16(__s0_195, -__s1_195, __s2_195, __p3_195); \
-  __ret_195; \
-})
-#else
-#define vfmsh_laneq_f16(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \
-  float16_t __s0_196 = __p0_196; \
-  float16_t __s1_196 = __p1_196; \
-  float16x8_t __s2_196 = __p2_196; \
-  float16x8_t __rev2_196;  __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_196; \
-  __ret_196 = __noswap_vfmah_laneq_f16(__s0_196, -__s1_196, __rev2_196, __p3_196); \
-  __ret_196; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f16(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \
-  float16x8_t __s0_197 = __p0_197; \
-  float16x8_t __s1_197 = __p1_197; \
-  float16x8_t __s2_197 = __p2_197; \
-  float16x8_t __ret_197; \
-  __ret_197 = vfmaq_laneq_f16(__s0_197, -__s1_197, __s2_197, __p3_197); \
-  __ret_197; \
-})
-#else
-#define vfmsq_laneq_f16(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \
-  float16x8_t __s0_198 = __p0_198; \
-  float16x8_t __s1_198 = __p1_198; \
-  float16x8_t __s2_198 = __p2_198; \
-  float16x8_t __rev0_198;  __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_198;  __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_198;  __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_198; \
-  __ret_198 = __noswap_vfmaq_laneq_f16(__rev0_198, -__rev1_198, __rev2_198, __p3_198); \
-  __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_198; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f16(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \
-  float16x4_t __s0_199 = __p0_199; \
-  float16x4_t __s1_199 = __p1_199; \
-  float16x8_t __s2_199 = __p2_199; \
-  float16x4_t __ret_199; \
-  __ret_199 = vfma_laneq_f16(__s0_199, -__s1_199, __s2_199, __p3_199); \
-  __ret_199; \
-})
-#else
-#define vfms_laneq_f16(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \
-  float16x4_t __s0_200 = __p0_200; \
-  float16x4_t __s1_200 = __p1_200; \
-  float16x8_t __s2_200 = __p2_200; \
-  float16x4_t __rev0_200;  __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \
-  float16x4_t __rev1_200;  __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \
-  float16x8_t __rev2_200;  __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_200; \
-  __ret_200 = __noswap_vfma_laneq_f16(__rev0_200, -__rev1_200, __rev2_200, __p3_200); \
-  __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \
-  __ret_200; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __ret; \
-  __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __ret; \
-  __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret; \
-})
-#else
-#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __s2 = __p2; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vmaxv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminnmv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \
-  __ret; \
-})
-#else
-#define vminvq_f16(__p0) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \
-  __ret; \
-})
-#else
-#define vminv_f16(__p0) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f16(__p0_201, __p1_201, __p2_201) __extension__ ({ \
-  float16x8_t __s0_201 = __p0_201; \
-  float16x8_t __s1_201 = __p1_201; \
-  float16x8_t __ret_201; \
-  __ret_201 = __s0_201 * splatq_laneq_f16(__s1_201, __p2_201); \
-  __ret_201; \
-})
-#else
-#define vmulq_laneq_f16(__p0_202, __p1_202, __p2_202) __extension__ ({ \
-  float16x8_t __s0_202 = __p0_202; \
-  float16x8_t __s1_202 = __p1_202; \
-  float16x8_t __rev0_202;  __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_202;  __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_202; \
-  __ret_202 = __rev0_202 * __noswap_splatq_laneq_f16(__rev1_202, __p2_202); \
-  __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_202; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f16(__p0_203, __p1_203, __p2_203) __extension__ ({ \
-  float16x4_t __s0_203 = __p0_203; \
-  float16x8_t __s1_203 = __p1_203; \
-  float16x4_t __ret_203; \
-  __ret_203 = __s0_203 * splat_laneq_f16(__s1_203, __p2_203); \
-  __ret_203; \
-})
-#else
-#define vmul_laneq_f16(__p0_204, __p1_204, __p2_204) __extension__ ({ \
-  float16x4_t __s0_204 = __p0_204; \
-  float16x8_t __s1_204 = __p1_204; \
-  float16x4_t __rev0_204;  __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 3, 2, 1, 0); \
-  float16x8_t __rev1_204;  __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_204; \
-  __ret_204 = __rev0_204 * __noswap_splat_laneq_f16(__rev1_204, __p2_204); \
-  __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 3, 2, 1, 0); \
-  __ret_204; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x4_t __s1 = __p1; \
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f16(__p0_205, __p1_205, __p2_205) __extension__ ({ \
-  float16x8_t __s0_205 = __p0_205; \
-  float16x4_t __s1_205 = __p1_205; \
-  float16x8_t __ret_205; \
-  __ret_205 = vmulxq_f16(__s0_205, splatq_lane_f16(__s1_205, __p2_205)); \
-  __ret_205; \
-})
-#else
-#define vmulxq_lane_f16(__p0_206, __p1_206, __p2_206) __extension__ ({ \
-  float16x8_t __s0_206 = __p0_206; \
-  float16x4_t __s1_206 = __p1_206; \
-  float16x8_t __rev0_206;  __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev1_206;  __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 3, 2, 1, 0); \
-  float16x8_t __ret_206; \
-  __ret_206 = __noswap_vmulxq_f16(__rev0_206, __noswap_splatq_lane_f16(__rev1_206, __p2_206)); \
-  __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_206; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f16(__p0_207, __p1_207, __p2_207) __extension__ ({ \
-  float16x4_t __s0_207 = __p0_207; \
-  float16x4_t __s1_207 = __p1_207; \
-  float16x4_t __ret_207; \
-  __ret_207 = vmulx_f16(__s0_207, splat_lane_f16(__s1_207, __p2_207)); \
-  __ret_207; \
-})
-#else
-#define vmulx_lane_f16(__p0_208, __p1_208, __p2_208) __extension__ ({ \
-  float16x4_t __s0_208 = __p0_208; \
-  float16x4_t __s1_208 = __p1_208; \
-  float16x4_t __rev0_208;  __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 3, 2, 1, 0); \
-  float16x4_t __rev1_208;  __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 3, 2, 1, 0); \
-  float16x4_t __ret_208; \
-  __ret_208 = __noswap_vmulx_f16(__rev0_208, __noswap_splat_lane_f16(__rev1_208, __p2_208)); \
-  __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 3, 2, 1, 0); \
-  __ret_208; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \
-  float16_t __s0 = __p0; \
-  float16x8_t __s1 = __p1; \
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret; \
-  __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f16(__p0_209, __p1_209, __p2_209) __extension__ ({ \
-  float16x8_t __s0_209 = __p0_209; \
-  float16x8_t __s1_209 = __p1_209; \
-  float16x8_t __ret_209; \
-  __ret_209 = vmulxq_f16(__s0_209, splatq_laneq_f16(__s1_209, __p2_209)); \
-  __ret_209; \
-})
-#else
-#define vmulxq_laneq_f16(__p0_210, __p1_210, __p2_210) __extension__ ({ \
-  float16x8_t __s0_210 = __p0_210; \
-  float16x8_t __s1_210 = __p1_210; \
-  float16x8_t __rev0_210;  __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev1_210;  __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_210; \
-  __ret_210 = __noswap_vmulxq_f16(__rev0_210, __noswap_splatq_laneq_f16(__rev1_210, __p2_210)); \
-  __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_210; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f16(__p0_211, __p1_211, __p2_211) __extension__ ({ \
-  float16x4_t __s0_211 = __p0_211; \
-  float16x8_t __s1_211 = __p1_211; \
-  float16x4_t __ret_211; \
-  __ret_211 = vmulx_f16(__s0_211, splat_laneq_f16(__s1_211, __p2_211)); \
-  __ret_211; \
-})
-#else
-#define vmulx_laneq_f16(__p0_212, __p1_212, __p2_212) __extension__ ({ \
-  float16x4_t __s0_212 = __p0_212; \
-  float16x8_t __s1_212 = __p1_212; \
-  float16x4_t __rev0_212;  __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 3, 2, 1, 0); \
-  float16x8_t __rev1_212;  __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_212; \
-  __ret_212 = __noswap_vmulx_f16(__rev0_212, __noswap_splat_laneq_f16(__rev1_212, __p2_212)); \
-  __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 3, 2, 1, 0); \
-  __ret_212; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __ret; \
-  __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \
-  float16x8_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret; \
-  __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __ret; \
-  __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret; \
-})
-#else
-#define vmulx_n_f16(__p0, __p1) __extension__ ({ \
-  float16x4_t __s0 = __p0; \
-  float16_t __s1 = __p1; \
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float16x4_t __ret; \
-  __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vrndiq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vrndi_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 40);
-  return __ret;
-}
-#else
-__ai float16x8_t vsqrtq_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = (float16x8_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 40);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 8);
-  return __ret;
-}
-#else
-__ai float16x4_t vsqrt_f16(float16x4_t __p0) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = (float16x4_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 8);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusdotq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vusdot_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_lane_s32(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \
-  int32x4_t __s0_213 = __p0_213; \
-  uint8x16_t __s1_213 = __p1_213; \
-  int8x8_t __s2_213 = __p2_213; \
-  int32x4_t __ret_213; \
-int8x8_t __reint_213 = __s2_213; \
-  __ret_213 = vusdotq_s32(__s0_213, __s1_213, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_213, __p3_213))); \
-  __ret_213; \
-})
-#else
-#define vusdotq_lane_s32(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \
-  int32x4_t __s0_214 = __p0_214; \
-  uint8x16_t __s1_214 = __p1_214; \
-  int8x8_t __s2_214 = __p2_214; \
-  int32x4_t __rev0_214;  __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \
-  uint8x16_t __rev1_214;  __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_214;  __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_214; \
-int8x8_t __reint_214 = __rev2_214; \
-  __ret_214 = __noswap_vusdotq_s32(__rev0_214, __rev1_214, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_214, __p3_214))); \
-  __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \
-  __ret_214; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_lane_s32(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \
-  int32x2_t __s0_215 = __p0_215; \
-  uint8x8_t __s1_215 = __p1_215; \
-  int8x8_t __s2_215 = __p2_215; \
-  int32x2_t __ret_215; \
-int8x8_t __reint_215 = __s2_215; \
-  __ret_215 = vusdot_s32(__s0_215, __s1_215, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_215, __p3_215))); \
-  __ret_215; \
-})
-#else
-#define vusdot_lane_s32(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \
-  int32x2_t __s0_216 = __p0_216; \
-  uint8x8_t __s1_216 = __p1_216; \
-  int8x8_t __s2_216 = __p2_216; \
-  int32x2_t __rev0_216;  __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 1, 0); \
-  uint8x8_t __rev1_216;  __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_216;  __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_216; \
-int8x8_t __reint_216 = __rev2_216; \
-  __ret_216 = __noswap_vusdot_s32(__rev0_216, __rev1_216, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_216, __p3_216))); \
-  __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 1, 0); \
-  __ret_216; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vusmmlaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX)
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqaddq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqaddq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqaddq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqaddq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqadd_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqadd_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqadd_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqadd_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s32(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \
-  int32x4_t __s0_217 = __p0_217; \
-  int32x4_t __s1_217 = __p1_217; \
-  int32x2_t __s2_217 = __p2_217; \
-  int32x4_t __ret_217; \
-  __ret_217 = vqaddq_s32(__s0_217, vqrdmulhq_s32(__s1_217, splatq_lane_s32(__s2_217, __p3_217))); \
-  __ret_217; \
-})
-#else
-#define vqrdmlahq_lane_s32(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \
-  int32x4_t __s0_218 = __p0_218; \
-  int32x4_t __s1_218 = __p1_218; \
-  int32x2_t __s2_218 = __p2_218; \
-  int32x4_t __rev0_218;  __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \
-  int32x4_t __rev1_218;  __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \
-  int32x2_t __rev2_218;  __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 1, 0); \
-  int32x4_t __ret_218; \
-  __ret_218 = __noswap_vqaddq_s32(__rev0_218, __noswap_vqrdmulhq_s32(__rev1_218, __noswap_splatq_lane_s32(__rev2_218, __p3_218))); \
-  __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \
-  __ret_218; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_lane_s16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \
-  int16x8_t __s0_219 = __p0_219; \
-  int16x8_t __s1_219 = __p1_219; \
-  int16x4_t __s2_219 = __p2_219; \
-  int16x8_t __ret_219; \
-  __ret_219 = vqaddq_s16(__s0_219, vqrdmulhq_s16(__s1_219, splatq_lane_s16(__s2_219, __p3_219))); \
-  __ret_219; \
-})
-#else
-#define vqrdmlahq_lane_s16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \
-  int16x8_t __s0_220 = __p0_220; \
-  int16x8_t __s1_220 = __p1_220; \
-  int16x4_t __s2_220 = __p2_220; \
-  int16x8_t __rev0_220;  __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_220;  __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_220;  __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \
-  int16x8_t __ret_220; \
-  __ret_220 = __noswap_vqaddq_s16(__rev0_220, __noswap_vqrdmulhq_s16(__rev1_220, __noswap_splatq_lane_s16(__rev2_220, __p3_220))); \
-  __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_220; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s32(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \
-  int32x2_t __s0_221 = __p0_221; \
-  int32x2_t __s1_221 = __p1_221; \
-  int32x2_t __s2_221 = __p2_221; \
-  int32x2_t __ret_221; \
-  __ret_221 = vqadd_s32(__s0_221, vqrdmulh_s32(__s1_221, splat_lane_s32(__s2_221, __p3_221))); \
-  __ret_221; \
-})
-#else
-#define vqrdmlah_lane_s32(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \
-  int32x2_t __s0_222 = __p0_222; \
-  int32x2_t __s1_222 = __p1_222; \
-  int32x2_t __s2_222 = __p2_222; \
-  int32x2_t __rev0_222;  __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 1, 0); \
-  int32x2_t __rev1_222;  __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 1, 0); \
-  int32x2_t __rev2_222;  __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 1, 0); \
-  int32x2_t __ret_222; \
-  __ret_222 = __noswap_vqadd_s32(__rev0_222, __noswap_vqrdmulh_s32(__rev1_222, __noswap_splat_lane_s32(__rev2_222, __p3_222))); \
-  __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 1, 0); \
-  __ret_222; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_lane_s16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \
-  int16x4_t __s0_223 = __p0_223; \
-  int16x4_t __s1_223 = __p1_223; \
-  int16x4_t __s2_223 = __p2_223; \
-  int16x4_t __ret_223; \
-  __ret_223 = vqadd_s16(__s0_223, vqrdmulh_s16(__s1_223, splat_lane_s16(__s2_223, __p3_223))); \
-  __ret_223; \
-})
-#else
-#define vqrdmlah_lane_s16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \
-  int16x4_t __s0_224 = __p0_224; \
-  int16x4_t __s1_224 = __p1_224; \
-  int16x4_t __s2_224 = __p2_224; \
-  int16x4_t __rev0_224;  __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 3, 2, 1, 0); \
-  int16x4_t __rev1_224;  __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 3, 2, 1, 0); \
-  int16x4_t __rev2_224;  __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 3, 2, 1, 0); \
-  int16x4_t __ret_224; \
-  __ret_224 = __noswap_vqadd_s16(__rev0_224, __noswap_vqrdmulh_s16(__rev1_224, __noswap_splat_lane_s16(__rev2_224, __p3_224))); \
-  __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 3, 2, 1, 0); \
-  __ret_224; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = vqsubq_s32(__p0, vqrdmulhq_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqsubq_s32(__rev0, __noswap_vqrdmulhq_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = vqsubq_s16(__p0, vqrdmulhq_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vqsubq_s16(__rev0, __noswap_vqrdmulhq_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = vqsub_s32(__p0, vqrdmulh_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __noswap_vqsub_s32(__rev0, __noswap_vqrdmulh_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = vqsub_s16(__p0, vqrdmulh_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __noswap_vqsub_s16(__rev0, __noswap_vqrdmulh_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s32(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \
-  int32x4_t __s0_225 = __p0_225; \
-  int32x4_t __s1_225 = __p1_225; \
-  int32x2_t __s2_225 = __p2_225; \
-  int32x4_t __ret_225; \
-  __ret_225 = vqsubq_s32(__s0_225, vqrdmulhq_s32(__s1_225, splatq_lane_s32(__s2_225, __p3_225))); \
-  __ret_225; \
-})
-#else
-#define vqrdmlshq_lane_s32(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \
-  int32x4_t __s0_226 = __p0_226; \
-  int32x4_t __s1_226 = __p1_226; \
-  int32x2_t __s2_226 = __p2_226; \
-  int32x4_t __rev0_226;  __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \
-  int32x4_t __rev1_226;  __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \
-  int32x2_t __rev2_226;  __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 1, 0); \
-  int32x4_t __ret_226; \
-  __ret_226 = __noswap_vqsubq_s32(__rev0_226, __noswap_vqrdmulhq_s32(__rev1_226, __noswap_splatq_lane_s32(__rev2_226, __p3_226))); \
-  __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \
-  __ret_226; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_lane_s16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \
-  int16x8_t __s0_227 = __p0_227; \
-  int16x8_t __s1_227 = __p1_227; \
-  int16x4_t __s2_227 = __p2_227; \
-  int16x8_t __ret_227; \
-  __ret_227 = vqsubq_s16(__s0_227, vqrdmulhq_s16(__s1_227, splatq_lane_s16(__s2_227, __p3_227))); \
-  __ret_227; \
-})
-#else
-#define vqrdmlshq_lane_s16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \
-  int16x8_t __s0_228 = __p0_228; \
-  int16x8_t __s1_228 = __p1_228; \
-  int16x4_t __s2_228 = __p2_228; \
-  int16x8_t __rev0_228;  __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_228;  __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_228;  __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \
-  int16x8_t __ret_228; \
-  __ret_228 = __noswap_vqsubq_s16(__rev0_228, __noswap_vqrdmulhq_s16(__rev1_228, __noswap_splatq_lane_s16(__rev2_228, __p3_228))); \
-  __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_228; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s32(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \
-  int32x2_t __s0_229 = __p0_229; \
-  int32x2_t __s1_229 = __p1_229; \
-  int32x2_t __s2_229 = __p2_229; \
-  int32x2_t __ret_229; \
-  __ret_229 = vqsub_s32(__s0_229, vqrdmulh_s32(__s1_229, splat_lane_s32(__s2_229, __p3_229))); \
-  __ret_229; \
-})
-#else
-#define vqrdmlsh_lane_s32(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \
-  int32x2_t __s0_230 = __p0_230; \
-  int32x2_t __s1_230 = __p1_230; \
-  int32x2_t __s2_230 = __p2_230; \
-  int32x2_t __rev0_230;  __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 1, 0); \
-  int32x2_t __rev1_230;  __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 1, 0); \
-  int32x2_t __rev2_230;  __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 1, 0); \
-  int32x2_t __ret_230; \
-  __ret_230 = __noswap_vqsub_s32(__rev0_230, __noswap_vqrdmulh_s32(__rev1_230, __noswap_splat_lane_s32(__rev2_230, __p3_230))); \
-  __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 1, 0); \
-  __ret_230; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_lane_s16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \
-  int16x4_t __s0_231 = __p0_231; \
-  int16x4_t __s1_231 = __p1_231; \
-  int16x4_t __s2_231 = __p2_231; \
-  int16x4_t __ret_231; \
-  __ret_231 = vqsub_s16(__s0_231, vqrdmulh_s16(__s1_231, splat_lane_s16(__s2_231, __p3_231))); \
-  __ret_231; \
-})
-#else
-#define vqrdmlsh_lane_s16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \
-  int16x4_t __s0_232 = __p0_232; \
-  int16x4_t __s1_232 = __p1_232; \
-  int16x4_t __s2_232 = __p2_232; \
-  int16x4_t __rev0_232;  __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 3, 2, 1, 0); \
-  int16x4_t __rev1_232;  __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 3, 2, 1, 0); \
-  int16x4_t __rev2_232;  __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 3, 2, 1, 0); \
-  int16x4_t __ret_232; \
-  __ret_232 = __noswap_vqsub_s16(__rev0_232, __noswap_vqrdmulh_s16(__rev1_232, __noswap_splat_lane_s16(__rev2_232, __p3_232))); \
-  __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 3, 2, 1, 0); \
-  __ret_232; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s32(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \
-  int32x4_t __s0_233 = __p0_233; \
-  int32x4_t __s1_233 = __p1_233; \
-  int32x4_t __s2_233 = __p2_233; \
-  int32x4_t __ret_233; \
-  __ret_233 = vqaddq_s32(__s0_233, vqrdmulhq_s32(__s1_233, splatq_laneq_s32(__s2_233, __p3_233))); \
-  __ret_233; \
-})
-#else
-#define vqrdmlahq_laneq_s32(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \
-  int32x4_t __s0_234 = __p0_234; \
-  int32x4_t __s1_234 = __p1_234; \
-  int32x4_t __s2_234 = __p2_234; \
-  int32x4_t __rev0_234;  __rev0_234 = __builtin_shufflevector(__s0_234, __s0_234, 3, 2, 1, 0); \
-  int32x4_t __rev1_234;  __rev1_234 = __builtin_shufflevector(__s1_234, __s1_234, 3, 2, 1, 0); \
-  int32x4_t __rev2_234;  __rev2_234 = __builtin_shufflevector(__s2_234, __s2_234, 3, 2, 1, 0); \
-  int32x4_t __ret_234; \
-  __ret_234 = __noswap_vqaddq_s32(__rev0_234, __noswap_vqrdmulhq_s32(__rev1_234, __noswap_splatq_laneq_s32(__rev2_234, __p3_234))); \
-  __ret_234 = __builtin_shufflevector(__ret_234, __ret_234, 3, 2, 1, 0); \
-  __ret_234; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahq_laneq_s16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \
-  int16x8_t __s0_235 = __p0_235; \
-  int16x8_t __s1_235 = __p1_235; \
-  int16x8_t __s2_235 = __p2_235; \
-  int16x8_t __ret_235; \
-  __ret_235 = vqaddq_s16(__s0_235, vqrdmulhq_s16(__s1_235, splatq_laneq_s16(__s2_235, __p3_235))); \
-  __ret_235; \
-})
-#else
-#define vqrdmlahq_laneq_s16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \
-  int16x8_t __s0_236 = __p0_236; \
-  int16x8_t __s1_236 = __p1_236; \
-  int16x8_t __s2_236 = __p2_236; \
-  int16x8_t __rev0_236;  __rev0_236 = __builtin_shufflevector(__s0_236, __s0_236, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_236;  __rev1_236 = __builtin_shufflevector(__s1_236, __s1_236, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_236;  __rev2_236 = __builtin_shufflevector(__s2_236, __s2_236, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_236; \
-  __ret_236 = __noswap_vqaddq_s16(__rev0_236, __noswap_vqrdmulhq_s16(__rev1_236, __noswap_splatq_laneq_s16(__rev2_236, __p3_236))); \
-  __ret_236 = __builtin_shufflevector(__ret_236, __ret_236, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_236; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s32(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \
-  int32x2_t __s0_237 = __p0_237; \
-  int32x2_t __s1_237 = __p1_237; \
-  int32x4_t __s2_237 = __p2_237; \
-  int32x2_t __ret_237; \
-  __ret_237 = vqadd_s32(__s0_237, vqrdmulh_s32(__s1_237, splat_laneq_s32(__s2_237, __p3_237))); \
-  __ret_237; \
-})
-#else
-#define vqrdmlah_laneq_s32(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \
-  int32x2_t __s0_238 = __p0_238; \
-  int32x2_t __s1_238 = __p1_238; \
-  int32x4_t __s2_238 = __p2_238; \
-  int32x2_t __rev0_238;  __rev0_238 = __builtin_shufflevector(__s0_238, __s0_238, 1, 0); \
-  int32x2_t __rev1_238;  __rev1_238 = __builtin_shufflevector(__s1_238, __s1_238, 1, 0); \
-  int32x4_t __rev2_238;  __rev2_238 = __builtin_shufflevector(__s2_238, __s2_238, 3, 2, 1, 0); \
-  int32x2_t __ret_238; \
-  __ret_238 = __noswap_vqadd_s32(__rev0_238, __noswap_vqrdmulh_s32(__rev1_238, __noswap_splat_laneq_s32(__rev2_238, __p3_238))); \
-  __ret_238 = __builtin_shufflevector(__ret_238, __ret_238, 1, 0); \
-  __ret_238; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlah_laneq_s16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \
-  int16x4_t __s0_239 = __p0_239; \
-  int16x4_t __s1_239 = __p1_239; \
-  int16x8_t __s2_239 = __p2_239; \
-  int16x4_t __ret_239; \
-  __ret_239 = vqadd_s16(__s0_239, vqrdmulh_s16(__s1_239, splat_laneq_s16(__s2_239, __p3_239))); \
-  __ret_239; \
-})
-#else
-#define vqrdmlah_laneq_s16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \
-  int16x4_t __s0_240 = __p0_240; \
-  int16x4_t __s1_240 = __p1_240; \
-  int16x8_t __s2_240 = __p2_240; \
-  int16x4_t __rev0_240;  __rev0_240 = __builtin_shufflevector(__s0_240, __s0_240, 3, 2, 1, 0); \
-  int16x4_t __rev1_240;  __rev1_240 = __builtin_shufflevector(__s1_240, __s1_240, 3, 2, 1, 0); \
-  int16x8_t __rev2_240;  __rev2_240 = __builtin_shufflevector(__s2_240, __s2_240, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_240; \
-  __ret_240 = __noswap_vqadd_s16(__rev0_240, __noswap_vqrdmulh_s16(__rev1_240, __noswap_splat_laneq_s16(__rev2_240, __p3_240))); \
-  __ret_240 = __builtin_shufflevector(__ret_240, __ret_240, 3, 2, 1, 0); \
-  __ret_240; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s32(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \
-  int32x4_t __s0_241 = __p0_241; \
-  int32x4_t __s1_241 = __p1_241; \
-  int32x4_t __s2_241 = __p2_241; \
-  int32x4_t __ret_241; \
-  __ret_241 = vqsubq_s32(__s0_241, vqrdmulhq_s32(__s1_241, splatq_laneq_s32(__s2_241, __p3_241))); \
-  __ret_241; \
-})
-#else
-#define vqrdmlshq_laneq_s32(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \
-  int32x4_t __s0_242 = __p0_242; \
-  int32x4_t __s1_242 = __p1_242; \
-  int32x4_t __s2_242 = __p2_242; \
-  int32x4_t __rev0_242;  __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 3, 2, 1, 0); \
-  int32x4_t __rev1_242;  __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 3, 2, 1, 0); \
-  int32x4_t __rev2_242;  __rev2_242 = __builtin_shufflevector(__s2_242, __s2_242, 3, 2, 1, 0); \
-  int32x4_t __ret_242; \
-  __ret_242 = __noswap_vqsubq_s32(__rev0_242, __noswap_vqrdmulhq_s32(__rev1_242, __noswap_splatq_laneq_s32(__rev2_242, __p3_242))); \
-  __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 3, 2, 1, 0); \
-  __ret_242; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshq_laneq_s16(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \
-  int16x8_t __s0_243 = __p0_243; \
-  int16x8_t __s1_243 = __p1_243; \
-  int16x8_t __s2_243 = __p2_243; \
-  int16x8_t __ret_243; \
-  __ret_243 = vqsubq_s16(__s0_243, vqrdmulhq_s16(__s1_243, splatq_laneq_s16(__s2_243, __p3_243))); \
-  __ret_243; \
-})
-#else
-#define vqrdmlshq_laneq_s16(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \
-  int16x8_t __s0_244 = __p0_244; \
-  int16x8_t __s1_244 = __p1_244; \
-  int16x8_t __s2_244 = __p2_244; \
-  int16x8_t __rev0_244;  __rev0_244 = __builtin_shufflevector(__s0_244, __s0_244, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_244;  __rev1_244 = __builtin_shufflevector(__s1_244, __s1_244, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_244;  __rev2_244 = __builtin_shufflevector(__s2_244, __s2_244, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_244; \
-  __ret_244 = __noswap_vqsubq_s16(__rev0_244, __noswap_vqrdmulhq_s16(__rev1_244, __noswap_splatq_laneq_s16(__rev2_244, __p3_244))); \
-  __ret_244 = __builtin_shufflevector(__ret_244, __ret_244, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_244; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s32(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \
-  int32x2_t __s0_245 = __p0_245; \
-  int32x2_t __s1_245 = __p1_245; \
-  int32x4_t __s2_245 = __p2_245; \
-  int32x2_t __ret_245; \
-  __ret_245 = vqsub_s32(__s0_245, vqrdmulh_s32(__s1_245, splat_laneq_s32(__s2_245, __p3_245))); \
-  __ret_245; \
-})
-#else
-#define vqrdmlsh_laneq_s32(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \
-  int32x2_t __s0_246 = __p0_246; \
-  int32x2_t __s1_246 = __p1_246; \
-  int32x4_t __s2_246 = __p2_246; \
-  int32x2_t __rev0_246;  __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \
-  int32x2_t __rev1_246;  __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \
-  int32x4_t __rev2_246;  __rev2_246 = __builtin_shufflevector(__s2_246, __s2_246, 3, 2, 1, 0); \
-  int32x2_t __ret_246; \
-  __ret_246 = __noswap_vqsub_s32(__rev0_246, __noswap_vqrdmulh_s32(__rev1_246, __noswap_splat_laneq_s32(__rev2_246, __p3_246))); \
-  __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \
-  __ret_246; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlsh_laneq_s16(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \
-  int16x4_t __s0_247 = __p0_247; \
-  int16x4_t __s1_247 = __p1_247; \
-  int16x8_t __s2_247 = __p2_247; \
-  int16x4_t __ret_247; \
-  __ret_247 = vqsub_s16(__s0_247, vqrdmulh_s16(__s1_247, splat_laneq_s16(__s2_247, __p3_247))); \
-  __ret_247; \
-})
-#else
-#define vqrdmlsh_laneq_s16(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \
-  int16x4_t __s0_248 = __p0_248; \
-  int16x4_t __s1_248 = __p1_248; \
-  int16x8_t __s2_248 = __p2_248; \
-  int16x4_t __rev0_248;  __rev0_248 = __builtin_shufflevector(__s0_248, __s0_248, 3, 2, 1, 0); \
-  int16x4_t __rev1_248;  __rev1_248 = __builtin_shufflevector(__s1_248, __s1_248, 3, 2, 1, 0); \
-  int16x8_t __rev2_248;  __rev2_248 = __builtin_shufflevector(__s2_248, __s2_248, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_248; \
-  __ret_248 = __noswap_vqsub_s16(__rev0_248, __noswap_vqrdmulh_s16(__rev1_248, __noswap_splat_laneq_s16(__rev2_248, __p3_248))); \
-  __ret_248 = __builtin_shufflevector(__ret_248, __ret_248, 3, 2, 1, 0); \
-  __ret_248; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vabsq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vabs_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai int64x1_t vabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int64_t vabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vabsd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1;
-  return __ret;
-}
-__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddlv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddlv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddlv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddlv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddlv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddlv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vaddvq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vaddvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vaddvq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vaddv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vaddv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vaddv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vaddv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vaddv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vaddv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vaddv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38);
-  return __ret;
-}
-#else
-__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 == __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 == __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 == __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_p64(poly64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_p16(poly16x4_t __p0) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_p16(poly16x8_t __p0) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vceqzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vceqzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vceqzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vceqzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_u64(uint64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vceqz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vceqz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vceqz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vceqz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64_t vceqzd_u64(uint64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
-  return __ret;
-}
-__ai int64_t vceqzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vceqzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vceqzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 >= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 >= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 >= __p1);
-  return __ret;
-}
-__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcgezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 > __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 > __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 > __p1);
-  return __ret;
-}
-__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcgtz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcgtz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcgtz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcgtz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcgtzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcgtzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcgtzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 <= __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 <= __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 <= __p1);
-  return __ret;
-}
-__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vclezq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vclezq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vclezq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vclezq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vclez_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vclez_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclez_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vclez_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vclezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vclezd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vclezs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__p0 < __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__rev0 < __rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t)(__p0 < __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vcltzq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vcltzq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcltzq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vcltzq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vcltz_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vcltz_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcltz_s64(int64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vcltz_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64_t vcltzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
-  return __ret;
-}
-__ai uint64_t vcltzd_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcltzs_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  return __ret;
-}
-#else
-__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p8(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \
-  poly8x16_t __s0_249 = __p0_249; \
-  poly8x8_t __s2_249 = __p2_249; \
-  poly8x16_t __ret_249; \
-  __ret_249 = vsetq_lane_p8(vget_lane_p8(__s2_249, __p3_249), __s0_249, __p1_249); \
-  __ret_249; \
-})
-#else
-#define vcopyq_lane_p8(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \
-  poly8x16_t __s0_250 = __p0_250; \
-  poly8x8_t __s2_250 = __p2_250; \
-  poly8x16_t __rev0_250;  __rev0_250 = __builtin_shufflevector(__s0_250, __s0_250, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_250;  __rev2_250 = __builtin_shufflevector(__s2_250, __s2_250, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_250; \
-  __ret_250 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_250, __p3_250), __rev0_250, __p1_250); \
-  __ret_250 = __builtin_shufflevector(__ret_250, __ret_250, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_250; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p16(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \
-  poly16x8_t __s0_251 = __p0_251; \
-  poly16x4_t __s2_251 = __p2_251; \
-  poly16x8_t __ret_251; \
-  __ret_251 = vsetq_lane_p16(vget_lane_p16(__s2_251, __p3_251), __s0_251, __p1_251); \
-  __ret_251; \
-})
-#else
-#define vcopyq_lane_p16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \
-  poly16x8_t __s0_252 = __p0_252; \
-  poly16x4_t __s2_252 = __p2_252; \
-  poly16x8_t __rev0_252;  __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __rev2_252;  __rev2_252 = __builtin_shufflevector(__s2_252, __s2_252, 3, 2, 1, 0); \
-  poly16x8_t __ret_252; \
-  __ret_252 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_252, __p3_252), __rev0_252, __p1_252); \
-  __ret_252 = __builtin_shufflevector(__ret_252, __ret_252, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_252; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u8(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \
-  uint8x16_t __s0_253 = __p0_253; \
-  uint8x8_t __s2_253 = __p2_253; \
-  uint8x16_t __ret_253; \
-  __ret_253 = vsetq_lane_u8(vget_lane_u8(__s2_253, __p3_253), __s0_253, __p1_253); \
-  __ret_253; \
-})
-#else
-#define vcopyq_lane_u8(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \
-  uint8x16_t __s0_254 = __p0_254; \
-  uint8x8_t __s2_254 = __p2_254; \
-  uint8x16_t __rev0_254;  __rev0_254 = __builtin_shufflevector(__s0_254, __s0_254, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_254;  __rev2_254 = __builtin_shufflevector(__s2_254, __s2_254, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_254; \
-  __ret_254 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_254, __p3_254), __rev0_254, __p1_254); \
-  __ret_254 = __builtin_shufflevector(__ret_254, __ret_254, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_254; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u32(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \
-  uint32x4_t __s0_255 = __p0_255; \
-  uint32x2_t __s2_255 = __p2_255; \
-  uint32x4_t __ret_255; \
-  __ret_255 = vsetq_lane_u32(vget_lane_u32(__s2_255, __p3_255), __s0_255, __p1_255); \
-  __ret_255; \
-})
-#else
-#define vcopyq_lane_u32(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \
-  uint32x4_t __s0_256 = __p0_256; \
-  uint32x2_t __s2_256 = __p2_256; \
-  uint32x4_t __rev0_256;  __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 3, 2, 1, 0); \
-  uint32x2_t __rev2_256;  __rev2_256 = __builtin_shufflevector(__s2_256, __s2_256, 1, 0); \
-  uint32x4_t __ret_256; \
-  __ret_256 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_256, __p3_256), __rev0_256, __p1_256); \
-  __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 3, 2, 1, 0); \
-  __ret_256; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u64(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \
-  uint64x2_t __s0_257 = __p0_257; \
-  uint64x1_t __s2_257 = __p2_257; \
-  uint64x2_t __ret_257; \
-  __ret_257 = vsetq_lane_u64(vget_lane_u64(__s2_257, __p3_257), __s0_257, __p1_257); \
-  __ret_257; \
-})
-#else
-#define vcopyq_lane_u64(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \
-  uint64x2_t __s0_258 = __p0_258; \
-  uint64x1_t __s2_258 = __p2_258; \
-  uint64x2_t __rev0_258;  __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 1, 0); \
-  uint64x2_t __ret_258; \
-  __ret_258 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_258, __p3_258), __rev0_258, __p1_258); \
-  __ret_258 = __builtin_shufflevector(__ret_258, __ret_258, 1, 0); \
-  __ret_258; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_u16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \
-  uint16x8_t __s0_259 = __p0_259; \
-  uint16x4_t __s2_259 = __p2_259; \
-  uint16x8_t __ret_259; \
-  __ret_259 = vsetq_lane_u16(vget_lane_u16(__s2_259, __p3_259), __s0_259, __p1_259); \
-  __ret_259; \
-})
-#else
-#define vcopyq_lane_u16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \
-  uint16x8_t __s0_260 = __p0_260; \
-  uint16x4_t __s2_260 = __p2_260; \
-  uint16x8_t __rev0_260;  __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_260;  __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 3, 2, 1, 0); \
-  uint16x8_t __ret_260; \
-  __ret_260 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_260, __p3_260), __rev0_260, __p1_260); \
-  __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_260; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s8(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \
-  int8x16_t __s0_261 = __p0_261; \
-  int8x8_t __s2_261 = __p2_261; \
-  int8x16_t __ret_261; \
-  __ret_261 = vsetq_lane_s8(vget_lane_s8(__s2_261, __p3_261), __s0_261, __p1_261); \
-  __ret_261; \
-})
-#else
-#define vcopyq_lane_s8(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \
-  int8x16_t __s0_262 = __p0_262; \
-  int8x8_t __s2_262 = __p2_262; \
-  int8x16_t __rev0_262;  __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_262;  __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_262; \
-  __ret_262 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_262, __p3_262), __rev0_262, __p1_262); \
-  __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_262; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \
-  float32x4_t __s0_263 = __p0_263; \
-  float32x2_t __s2_263 = __p2_263; \
-  float32x4_t __ret_263; \
-  __ret_263 = vsetq_lane_f32(vget_lane_f32(__s2_263, __p3_263), __s0_263, __p1_263); \
-  __ret_263; \
-})
-#else
-#define vcopyq_lane_f32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \
-  float32x4_t __s0_264 = __p0_264; \
-  float32x2_t __s2_264 = __p2_264; \
-  float32x4_t __rev0_264;  __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \
-  float32x2_t __rev2_264;  __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 1, 0); \
-  float32x4_t __ret_264; \
-  __ret_264 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_264, __p3_264), __rev0_264, __p1_264); \
-  __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \
-  __ret_264; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s32(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \
-  int32x4_t __s0_265 = __p0_265; \
-  int32x2_t __s2_265 = __p2_265; \
-  int32x4_t __ret_265; \
-  __ret_265 = vsetq_lane_s32(vget_lane_s32(__s2_265, __p3_265), __s0_265, __p1_265); \
-  __ret_265; \
-})
-#else
-#define vcopyq_lane_s32(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \
-  int32x4_t __s0_266 = __p0_266; \
-  int32x2_t __s2_266 = __p2_266; \
-  int32x4_t __rev0_266;  __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 3, 2, 1, 0); \
-  int32x2_t __rev2_266;  __rev2_266 = __builtin_shufflevector(__s2_266, __s2_266, 1, 0); \
-  int32x4_t __ret_266; \
-  __ret_266 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_266, __p3_266), __rev0_266, __p1_266); \
-  __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 3, 2, 1, 0); \
-  __ret_266; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s64(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \
-  int64x2_t __s0_267 = __p0_267; \
-  int64x1_t __s2_267 = __p2_267; \
-  int64x2_t __ret_267; \
-  __ret_267 = vsetq_lane_s64(vget_lane_s64(__s2_267, __p3_267), __s0_267, __p1_267); \
-  __ret_267; \
-})
-#else
-#define vcopyq_lane_s64(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \
-  int64x2_t __s0_268 = __p0_268; \
-  int64x1_t __s2_268 = __p2_268; \
-  int64x2_t __rev0_268;  __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 1, 0); \
-  int64x2_t __ret_268; \
-  __ret_268 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_268, __p3_268), __rev0_268, __p1_268); \
-  __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 1, 0); \
-  __ret_268; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_s16(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \
-  int16x8_t __s0_269 = __p0_269; \
-  int16x4_t __s2_269 = __p2_269; \
-  int16x8_t __ret_269; \
-  __ret_269 = vsetq_lane_s16(vget_lane_s16(__s2_269, __p3_269), __s0_269, __p1_269); \
-  __ret_269; \
-})
-#else
-#define vcopyq_lane_s16(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \
-  int16x8_t __s0_270 = __p0_270; \
-  int16x4_t __s2_270 = __p2_270; \
-  int16x8_t __rev0_270;  __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_270;  __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 3, 2, 1, 0); \
-  int16x8_t __ret_270; \
-  __ret_270 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_270, __p3_270), __rev0_270, __p1_270); \
-  __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_270; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p8(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \
-  poly8x8_t __s0_271 = __p0_271; \
-  poly8x8_t __s2_271 = __p2_271; \
-  poly8x8_t __ret_271; \
-  __ret_271 = vset_lane_p8(vget_lane_p8(__s2_271, __p3_271), __s0_271, __p1_271); \
-  __ret_271; \
-})
-#else
-#define vcopy_lane_p8(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \
-  poly8x8_t __s0_272 = __p0_272; \
-  poly8x8_t __s2_272 = __p2_272; \
-  poly8x8_t __rev0_272;  __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __rev2_272;  __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_272; \
-  __ret_272 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_272, __p3_272), __rev0_272, __p1_272); \
-  __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_272; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_p16(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \
-  poly16x4_t __s0_273 = __p0_273; \
-  poly16x4_t __s2_273 = __p2_273; \
-  poly16x4_t __ret_273; \
-  __ret_273 = vset_lane_p16(vget_lane_p16(__s2_273, __p3_273), __s0_273, __p1_273); \
-  __ret_273; \
-})
-#else
-#define vcopy_lane_p16(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \
-  poly16x4_t __s0_274 = __p0_274; \
-  poly16x4_t __s2_274 = __p2_274; \
-  poly16x4_t __rev0_274;  __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 3, 2, 1, 0); \
-  poly16x4_t __rev2_274;  __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 3, 2, 1, 0); \
-  poly16x4_t __ret_274; \
-  __ret_274 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_274, __p3_274), __rev0_274, __p1_274); \
-  __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 3, 2, 1, 0); \
-  __ret_274; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u8(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \
-  uint8x8_t __s0_275 = __p0_275; \
-  uint8x8_t __s2_275 = __p2_275; \
-  uint8x8_t __ret_275; \
-  __ret_275 = vset_lane_u8(vget_lane_u8(__s2_275, __p3_275), __s0_275, __p1_275); \
-  __ret_275; \
-})
-#else
-#define vcopy_lane_u8(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \
-  uint8x8_t __s0_276 = __p0_276; \
-  uint8x8_t __s2_276 = __p2_276; \
-  uint8x8_t __rev0_276;  __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_276;  __rev2_276 = __builtin_shufflevector(__s2_276, __s2_276, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_276; \
-  __ret_276 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_276, __p3_276), __rev0_276, __p1_276); \
-  __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_276; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \
-  uint32x2_t __s0_277 = __p0_277; \
-  uint32x2_t __s2_277 = __p2_277; \
-  uint32x2_t __ret_277; \
-  __ret_277 = vset_lane_u32(vget_lane_u32(__s2_277, __p3_277), __s0_277, __p1_277); \
-  __ret_277; \
-})
-#else
-#define vcopy_lane_u32(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \
-  uint32x2_t __s0_278 = __p0_278; \
-  uint32x2_t __s2_278 = __p2_278; \
-  uint32x2_t __rev0_278;  __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 1, 0); \
-  uint32x2_t __rev2_278;  __rev2_278 = __builtin_shufflevector(__s2_278, __s2_278, 1, 0); \
-  uint32x2_t __ret_278; \
-  __ret_278 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_278, __p3_278), __rev0_278, __p1_278); \
-  __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 1, 0); \
-  __ret_278; \
-})
-#endif
-
-#define vcopy_lane_u64(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \
-  uint64x1_t __s0_279 = __p0_279; \
-  uint64x1_t __s2_279 = __p2_279; \
-  uint64x1_t __ret_279; \
-  __ret_279 = vset_lane_u64(vget_lane_u64(__s2_279, __p3_279), __s0_279, __p1_279); \
-  __ret_279; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_u16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \
-  uint16x4_t __s0_280 = __p0_280; \
-  uint16x4_t __s2_280 = __p2_280; \
-  uint16x4_t __ret_280; \
-  __ret_280 = vset_lane_u16(vget_lane_u16(__s2_280, __p3_280), __s0_280, __p1_280); \
-  __ret_280; \
-})
-#else
-#define vcopy_lane_u16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \
-  uint16x4_t __s0_281 = __p0_281; \
-  uint16x4_t __s2_281 = __p2_281; \
-  uint16x4_t __rev0_281;  __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 3, 2, 1, 0); \
-  uint16x4_t __rev2_281;  __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \
-  uint16x4_t __ret_281; \
-  __ret_281 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_281, __p3_281), __rev0_281, __p1_281); \
-  __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 3, 2, 1, 0); \
-  __ret_281; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s8(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \
-  int8x8_t __s0_282 = __p0_282; \
-  int8x8_t __s2_282 = __p2_282; \
-  int8x8_t __ret_282; \
-  __ret_282 = vset_lane_s8(vget_lane_s8(__s2_282, __p3_282), __s0_282, __p1_282); \
-  __ret_282; \
-})
-#else
-#define vcopy_lane_s8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \
-  int8x8_t __s0_283 = __p0_283; \
-  int8x8_t __s2_283 = __p2_283; \
-  int8x8_t __rev0_283;  __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __rev2_283;  __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_283; \
-  __ret_283 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_283, __p3_283), __rev0_283, __p1_283); \
-  __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_283; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_f32(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \
-  float32x2_t __s0_284 = __p0_284; \
-  float32x2_t __s2_284 = __p2_284; \
-  float32x2_t __ret_284; \
-  __ret_284 = vset_lane_f32(vget_lane_f32(__s2_284, __p3_284), __s0_284, __p1_284); \
-  __ret_284; \
-})
-#else
-#define vcopy_lane_f32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \
-  float32x2_t __s0_285 = __p0_285; \
-  float32x2_t __s2_285 = __p2_285; \
-  float32x2_t __rev0_285;  __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 1, 0); \
-  float32x2_t __rev2_285;  __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 1, 0); \
-  float32x2_t __ret_285; \
-  __ret_285 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_285, __p3_285), __rev0_285, __p1_285); \
-  __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 1, 0); \
-  __ret_285; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s32(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \
-  int32x2_t __s0_286 = __p0_286; \
-  int32x2_t __s2_286 = __p2_286; \
-  int32x2_t __ret_286; \
-  __ret_286 = vset_lane_s32(vget_lane_s32(__s2_286, __p3_286), __s0_286, __p1_286); \
-  __ret_286; \
-})
-#else
-#define vcopy_lane_s32(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \
-  int32x2_t __s0_287 = __p0_287; \
-  int32x2_t __s2_287 = __p2_287; \
-  int32x2_t __rev0_287;  __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \
-  int32x2_t __rev2_287;  __rev2_287 = __builtin_shufflevector(__s2_287, __s2_287, 1, 0); \
-  int32x2_t __ret_287; \
-  __ret_287 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_287, __p3_287), __rev0_287, __p1_287); \
-  __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \
-  __ret_287; \
-})
-#endif
-
-#define vcopy_lane_s64(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \
-  int64x1_t __s0_288 = __p0_288; \
-  int64x1_t __s2_288 = __p2_288; \
-  int64x1_t __ret_288; \
-  __ret_288 = vset_lane_s64(vget_lane_s64(__s2_288, __p3_288), __s0_288, __p1_288); \
-  __ret_288; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_lane_s16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \
-  int16x4_t __s0_289 = __p0_289; \
-  int16x4_t __s2_289 = __p2_289; \
-  int16x4_t __ret_289; \
-  __ret_289 = vset_lane_s16(vget_lane_s16(__s2_289, __p3_289), __s0_289, __p1_289); \
-  __ret_289; \
-})
-#else
-#define vcopy_lane_s16(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \
-  int16x4_t __s0_290 = __p0_290; \
-  int16x4_t __s2_290 = __p2_290; \
-  int16x4_t __rev0_290;  __rev0_290 = __builtin_shufflevector(__s0_290, __s0_290, 3, 2, 1, 0); \
-  int16x4_t __rev2_290;  __rev2_290 = __builtin_shufflevector(__s2_290, __s2_290, 3, 2, 1, 0); \
-  int16x4_t __ret_290; \
-  __ret_290 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_290, __p3_290), __rev0_290, __p1_290); \
-  __ret_290 = __builtin_shufflevector(__ret_290, __ret_290, 3, 2, 1, 0); \
-  __ret_290; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \
-  poly8x16_t __s0_291 = __p0_291; \
-  poly8x16_t __s2_291 = __p2_291; \
-  poly8x16_t __ret_291; \
-  __ret_291 = vsetq_lane_p8(vgetq_lane_p8(__s2_291, __p3_291), __s0_291, __p1_291); \
-  __ret_291; \
-})
-#else
-#define vcopyq_laneq_p8(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \
-  poly8x16_t __s0_292 = __p0_292; \
-  poly8x16_t __s2_292 = __p2_292; \
-  poly8x16_t __rev0_292;  __rev0_292 = __builtin_shufflevector(__s0_292, __s0_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_292;  __rev2_292 = __builtin_shufflevector(__s2_292, __s2_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_292; \
-  __ret_292 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_292, __p3_292), __rev0_292, __p1_292); \
-  __ret_292 = __builtin_shufflevector(__ret_292, __ret_292, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_292; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p16(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \
-  poly16x8_t __s0_293 = __p0_293; \
-  poly16x8_t __s2_293 = __p2_293; \
-  poly16x8_t __ret_293; \
-  __ret_293 = vsetq_lane_p16(vgetq_lane_p16(__s2_293, __p3_293), __s0_293, __p1_293); \
-  __ret_293; \
-})
-#else
-#define vcopyq_laneq_p16(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \
-  poly16x8_t __s0_294 = __p0_294; \
-  poly16x8_t __s2_294 = __p2_294; \
-  poly16x8_t __rev0_294;  __rev0_294 = __builtin_shufflevector(__s0_294, __s0_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __rev2_294;  __rev2_294 = __builtin_shufflevector(__s2_294, __s2_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_294; \
-  __ret_294 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_294, __p3_294), __rev0_294, __p1_294); \
-  __ret_294 = __builtin_shufflevector(__ret_294, __ret_294, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_294; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u8(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \
-  uint8x16_t __s0_295 = __p0_295; \
-  uint8x16_t __s2_295 = __p2_295; \
-  uint8x16_t __ret_295; \
-  __ret_295 = vsetq_lane_u8(vgetq_lane_u8(__s2_295, __p3_295), __s0_295, __p1_295); \
-  __ret_295; \
-})
-#else
-#define vcopyq_laneq_u8(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \
-  uint8x16_t __s0_296 = __p0_296; \
-  uint8x16_t __s2_296 = __p2_296; \
-  uint8x16_t __rev0_296;  __rev0_296 = __builtin_shufflevector(__s0_296, __s0_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_296;  __rev2_296 = __builtin_shufflevector(__s2_296, __s2_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_296; \
-  __ret_296 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_296, __p3_296), __rev0_296, __p1_296); \
-  __ret_296 = __builtin_shufflevector(__ret_296, __ret_296, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_296; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u32(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \
-  uint32x4_t __s0_297 = __p0_297; \
-  uint32x4_t __s2_297 = __p2_297; \
-  uint32x4_t __ret_297; \
-  __ret_297 = vsetq_lane_u32(vgetq_lane_u32(__s2_297, __p3_297), __s0_297, __p1_297); \
-  __ret_297; \
-})
-#else
-#define vcopyq_laneq_u32(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \
-  uint32x4_t __s0_298 = __p0_298; \
-  uint32x4_t __s2_298 = __p2_298; \
-  uint32x4_t __rev0_298;  __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 3, 2, 1, 0); \
-  uint32x4_t __rev2_298;  __rev2_298 = __builtin_shufflevector(__s2_298, __s2_298, 3, 2, 1, 0); \
-  uint32x4_t __ret_298; \
-  __ret_298 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_298, __p3_298), __rev0_298, __p1_298); \
-  __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 3, 2, 1, 0); \
-  __ret_298; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u64(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \
-  uint64x2_t __s0_299 = __p0_299; \
-  uint64x2_t __s2_299 = __p2_299; \
-  uint64x2_t __ret_299; \
-  __ret_299 = vsetq_lane_u64(vgetq_lane_u64(__s2_299, __p3_299), __s0_299, __p1_299); \
-  __ret_299; \
-})
-#else
-#define vcopyq_laneq_u64(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \
-  uint64x2_t __s0_300 = __p0_300; \
-  uint64x2_t __s2_300 = __p2_300; \
-  uint64x2_t __rev0_300;  __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 1, 0); \
-  uint64x2_t __rev2_300;  __rev2_300 = __builtin_shufflevector(__s2_300, __s2_300, 1, 0); \
-  uint64x2_t __ret_300; \
-  __ret_300 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_300, __p3_300), __rev0_300, __p1_300); \
-  __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 1, 0); \
-  __ret_300; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_u16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \
-  uint16x8_t __s0_301 = __p0_301; \
-  uint16x8_t __s2_301 = __p2_301; \
-  uint16x8_t __ret_301; \
-  __ret_301 = vsetq_lane_u16(vgetq_lane_u16(__s2_301, __p3_301), __s0_301, __p1_301); \
-  __ret_301; \
-})
-#else
-#define vcopyq_laneq_u16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \
-  uint16x8_t __s0_302 = __p0_302; \
-  uint16x8_t __s2_302 = __p2_302; \
-  uint16x8_t __rev0_302;  __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_302;  __rev2_302 = __builtin_shufflevector(__s2_302, __s2_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_302; \
-  __ret_302 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_302, __p3_302), __rev0_302, __p1_302); \
-  __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_302; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \
-  int8x16_t __s0_303 = __p0_303; \
-  int8x16_t __s2_303 = __p2_303; \
-  int8x16_t __ret_303; \
-  __ret_303 = vsetq_lane_s8(vgetq_lane_s8(__s2_303, __p3_303), __s0_303, __p1_303); \
-  __ret_303; \
-})
-#else
-#define vcopyq_laneq_s8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \
-  int8x16_t __s0_304 = __p0_304; \
-  int8x16_t __s2_304 = __p2_304; \
-  int8x16_t __rev0_304;  __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_304;  __rev2_304 = __builtin_shufflevector(__s2_304, __s2_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_304; \
-  __ret_304 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_304, __p3_304), __rev0_304, __p1_304); \
-  __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_304; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \
-  float32x4_t __s0_305 = __p0_305; \
-  float32x4_t __s2_305 = __p2_305; \
-  float32x4_t __ret_305; \
-  __ret_305 = vsetq_lane_f32(vgetq_lane_f32(__s2_305, __p3_305), __s0_305, __p1_305); \
-  __ret_305; \
-})
-#else
-#define vcopyq_laneq_f32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \
-  float32x4_t __s0_306 = __p0_306; \
-  float32x4_t __s2_306 = __p2_306; \
-  float32x4_t __rev0_306;  __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \
-  float32x4_t __rev2_306;  __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 3, 2, 1, 0); \
-  float32x4_t __ret_306; \
-  __ret_306 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_306, __p3_306), __rev0_306, __p1_306); \
-  __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \
-  __ret_306; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \
-  int32x4_t __s0_307 = __p0_307; \
-  int32x4_t __s2_307 = __p2_307; \
-  int32x4_t __ret_307; \
-  __ret_307 = vsetq_lane_s32(vgetq_lane_s32(__s2_307, __p3_307), __s0_307, __p1_307); \
-  __ret_307; \
-})
-#else
-#define vcopyq_laneq_s32(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \
-  int32x4_t __s0_308 = __p0_308; \
-  int32x4_t __s2_308 = __p2_308; \
-  int32x4_t __rev0_308;  __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 3, 2, 1, 0); \
-  int32x4_t __rev2_308;  __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 3, 2, 1, 0); \
-  int32x4_t __ret_308; \
-  __ret_308 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_308, __p3_308), __rev0_308, __p1_308); \
-  __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 3, 2, 1, 0); \
-  __ret_308; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s64(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \
-  int64x2_t __s0_309 = __p0_309; \
-  int64x2_t __s2_309 = __p2_309; \
-  int64x2_t __ret_309; \
-  __ret_309 = vsetq_lane_s64(vgetq_lane_s64(__s2_309, __p3_309), __s0_309, __p1_309); \
-  __ret_309; \
-})
-#else
-#define vcopyq_laneq_s64(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \
-  int64x2_t __s0_310 = __p0_310; \
-  int64x2_t __s2_310 = __p2_310; \
-  int64x2_t __rev0_310;  __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 1, 0); \
-  int64x2_t __rev2_310;  __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 1, 0); \
-  int64x2_t __ret_310; \
-  __ret_310 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_310, __p3_310), __rev0_310, __p1_310); \
-  __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 1, 0); \
-  __ret_310; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_s16(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \
-  int16x8_t __s0_311 = __p0_311; \
-  int16x8_t __s2_311 = __p2_311; \
-  int16x8_t __ret_311; \
-  __ret_311 = vsetq_lane_s16(vgetq_lane_s16(__s2_311, __p3_311), __s0_311, __p1_311); \
-  __ret_311; \
-})
-#else
-#define vcopyq_laneq_s16(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \
-  int16x8_t __s0_312 = __p0_312; \
-  int16x8_t __s2_312 = __p2_312; \
-  int16x8_t __rev0_312;  __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_312;  __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_312; \
-  __ret_312 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_312, __p3_312), __rev0_312, __p1_312); \
-  __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_312; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p8(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \
-  poly8x8_t __s0_313 = __p0_313; \
-  poly8x16_t __s2_313 = __p2_313; \
-  poly8x8_t __ret_313; \
-  __ret_313 = vset_lane_p8(vgetq_lane_p8(__s2_313, __p3_313), __s0_313, __p1_313); \
-  __ret_313; \
-})
-#else
-#define vcopy_laneq_p8(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \
-  poly8x8_t __s0_314 = __p0_314; \
-  poly8x16_t __s2_314 = __p2_314; \
-  poly8x8_t __rev0_314;  __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __rev2_314;  __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_314; \
-  __ret_314 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_314, __p3_314), __rev0_314, __p1_314); \
-  __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_314; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p16(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \
-  poly16x4_t __s0_315 = __p0_315; \
-  poly16x8_t __s2_315 = __p2_315; \
-  poly16x4_t __ret_315; \
-  __ret_315 = vset_lane_p16(vgetq_lane_p16(__s2_315, __p3_315), __s0_315, __p1_315); \
-  __ret_315; \
-})
-#else
-#define vcopy_laneq_p16(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \
-  poly16x4_t __s0_316 = __p0_316; \
-  poly16x8_t __s2_316 = __p2_316; \
-  poly16x4_t __rev0_316;  __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \
-  poly16x8_t __rev2_316;  __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_316; \
-  __ret_316 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_316, __p3_316), __rev0_316, __p1_316); \
-  __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \
-  __ret_316; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u8(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \
-  uint8x8_t __s0_317 = __p0_317; \
-  uint8x16_t __s2_317 = __p2_317; \
-  uint8x8_t __ret_317; \
-  __ret_317 = vset_lane_u8(vgetq_lane_u8(__s2_317, __p3_317), __s0_317, __p1_317); \
-  __ret_317; \
-})
-#else
-#define vcopy_laneq_u8(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \
-  uint8x8_t __s0_318 = __p0_318; \
-  uint8x16_t __s2_318 = __p2_318; \
-  uint8x8_t __rev0_318;  __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_318;  __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_318; \
-  __ret_318 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_318, __p3_318), __rev0_318, __p1_318); \
-  __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_318; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u32(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \
-  uint32x2_t __s0_319 = __p0_319; \
-  uint32x4_t __s2_319 = __p2_319; \
-  uint32x2_t __ret_319; \
-  __ret_319 = vset_lane_u32(vgetq_lane_u32(__s2_319, __p3_319), __s0_319, __p1_319); \
-  __ret_319; \
-})
-#else
-#define vcopy_laneq_u32(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \
-  uint32x2_t __s0_320 = __p0_320; \
-  uint32x4_t __s2_320 = __p2_320; \
-  uint32x2_t __rev0_320;  __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 1, 0); \
-  uint32x4_t __rev2_320;  __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 3, 2, 1, 0); \
-  uint32x2_t __ret_320; \
-  __ret_320 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_320, __p3_320), __rev0_320, __p1_320); \
-  __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 1, 0); \
-  __ret_320; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u64(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \
-  uint64x1_t __s0_321 = __p0_321; \
-  uint64x2_t __s2_321 = __p2_321; \
-  uint64x1_t __ret_321; \
-  __ret_321 = vset_lane_u64(vgetq_lane_u64(__s2_321, __p3_321), __s0_321, __p1_321); \
-  __ret_321; \
-})
-#else
-#define vcopy_laneq_u64(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \
-  uint64x1_t __s0_322 = __p0_322; \
-  uint64x2_t __s2_322 = __p2_322; \
-  uint64x2_t __rev2_322;  __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 1, 0); \
-  uint64x1_t __ret_322; \
-  __ret_322 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_322, __p3_322), __s0_322, __p1_322); \
-  __ret_322; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_u16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \
-  uint16x4_t __s0_323 = __p0_323; \
-  uint16x8_t __s2_323 = __p2_323; \
-  uint16x4_t __ret_323; \
-  __ret_323 = vset_lane_u16(vgetq_lane_u16(__s2_323, __p3_323), __s0_323, __p1_323); \
-  __ret_323; \
-})
-#else
-#define vcopy_laneq_u16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \
-  uint16x4_t __s0_324 = __p0_324; \
-  uint16x8_t __s2_324 = __p2_324; \
-  uint16x4_t __rev0_324;  __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \
-  uint16x8_t __rev2_324;  __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_324; \
-  __ret_324 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_324, __p3_324), __rev0_324, __p1_324); \
-  __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \
-  __ret_324; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \
-  int8x8_t __s0_325 = __p0_325; \
-  int8x16_t __s2_325 = __p2_325; \
-  int8x8_t __ret_325; \
-  __ret_325 = vset_lane_s8(vgetq_lane_s8(__s2_325, __p3_325), __s0_325, __p1_325); \
-  __ret_325; \
-})
-#else
-#define vcopy_laneq_s8(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \
-  int8x8_t __s0_326 = __p0_326; \
-  int8x16_t __s2_326 = __p2_326; \
-  int8x8_t __rev0_326;  __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_326;  __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_326; \
-  __ret_326 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_326, __p3_326), __rev0_326, __p1_326); \
-  __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_326; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \
-  float32x2_t __s0_327 = __p0_327; \
-  float32x4_t __s2_327 = __p2_327; \
-  float32x2_t __ret_327; \
-  __ret_327 = vset_lane_f32(vgetq_lane_f32(__s2_327, __p3_327), __s0_327, __p1_327); \
-  __ret_327; \
-})
-#else
-#define vcopy_laneq_f32(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \
-  float32x2_t __s0_328 = __p0_328; \
-  float32x4_t __s2_328 = __p2_328; \
-  float32x2_t __rev0_328;  __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 1, 0); \
-  float32x4_t __rev2_328;  __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \
-  float32x2_t __ret_328; \
-  __ret_328 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_328, __p3_328), __rev0_328, __p1_328); \
-  __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 1, 0); \
-  __ret_328; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s32(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \
-  int32x2_t __s0_329 = __p0_329; \
-  int32x4_t __s2_329 = __p2_329; \
-  int32x2_t __ret_329; \
-  __ret_329 = vset_lane_s32(vgetq_lane_s32(__s2_329, __p3_329), __s0_329, __p1_329); \
-  __ret_329; \
-})
-#else
-#define vcopy_laneq_s32(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \
-  int32x2_t __s0_330 = __p0_330; \
-  int32x4_t __s2_330 = __p2_330; \
-  int32x2_t __rev0_330;  __rev0_330 = __builtin_shufflevector(__s0_330, __s0_330, 1, 0); \
-  int32x4_t __rev2_330;  __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 3, 2, 1, 0); \
-  int32x2_t __ret_330; \
-  __ret_330 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_330, __p3_330), __rev0_330, __p1_330); \
-  __ret_330 = __builtin_shufflevector(__ret_330, __ret_330, 1, 0); \
-  __ret_330; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s64(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \
-  int64x1_t __s0_331 = __p0_331; \
-  int64x2_t __s2_331 = __p2_331; \
-  int64x1_t __ret_331; \
-  __ret_331 = vset_lane_s64(vgetq_lane_s64(__s2_331, __p3_331), __s0_331, __p1_331); \
-  __ret_331; \
-})
-#else
-#define vcopy_laneq_s64(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \
-  int64x1_t __s0_332 = __p0_332; \
-  int64x2_t __s2_332 = __p2_332; \
-  int64x2_t __rev2_332;  __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 1, 0); \
-  int64x1_t __ret_332; \
-  __ret_332 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_332, __p3_332), __s0_332, __p1_332); \
-  __ret_332; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_s16(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \
-  int16x4_t __s0_333 = __p0_333; \
-  int16x8_t __s2_333 = __p2_333; \
-  int16x4_t __ret_333; \
-  __ret_333 = vset_lane_s16(vgetq_lane_s16(__s2_333, __p3_333), __s0_333, __p1_333); \
-  __ret_333; \
-})
-#else
-#define vcopy_laneq_s16(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \
-  int16x4_t __s0_334 = __p0_334; \
-  int16x8_t __s2_334 = __p2_334; \
-  int16x4_t __rev0_334;  __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 3, 2, 1, 0); \
-  int16x8_t __rev2_334;  __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_334; \
-  __ret_334 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_334, __p3_334), __rev0_334, __p1_334); \
-  __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 3, 2, 1, 0); \
-  __ret_334; \
-})
-#endif
-
-#define vcreate_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (poly64x1_t)(__promote); \
-  __ret; \
-})
-#define vcreate_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  uint64_t __promote = __p0; \
-  __ret = (float64x1_t)(__promote); \
-  __ret; \
-})
-__ai float32_t vcvts_f32_s32(int32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0);
-  return __ret;
-}
-__ai float32_t vcvts_f32_u32(uint32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vcvtd_f64_s64(int64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0);
-  return __ret;
-}
-__ai float64_t vcvtd_f64_u64(uint64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x8_t __ret;
-  __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1));
-  return __ret;
-}
-#else
-__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) {
-  float16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float16x8_t __ret;
-  __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_f16(vget_high_f16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) {
-  float16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float64x2_t __ret;
-  __ret = vcvt_f64_f32(vget_high_f32(__p0));
-  return __ret;
-}
-#else
-__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \
-  __ret; \
-})
-#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \
-  __ret; \
-})
-#else
-#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64x2_t __ret; \
-  __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  int64x1_t __ret; \
-  __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \
-  __ret; \
-})
-#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \
-  __ret; \
-})
-#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \
-  __ret; \
-})
-#else
-#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64x2_t __ret; \
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  uint64x1_t __ret; \
-  __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \
-  __ret; \
-})
-#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \
-  __ret; \
-})
-__ai int32_t vcvts_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai uint32_t vcvts_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19);
-  return __ret;
-}
-__ai int32_t vcvtas_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtad_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtas_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtad_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtms_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtmd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtms_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtmd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtns_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtnd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtns_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtnd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0);
-  return __ret;
-}
-__ai int32_t vcvtps_s32_f32(float32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0);
-  return __ret;
-}
-__ai int64_t vcvtpd_s64_f64(float64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0);
-  return __ret;
-}
-__ai uint32_t vcvtps_u32_f32(float32_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vcvtpd_u64_f64(float64_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0);
-  return __ret;
-}
-__ai float32_t vcvtxd_f32_f64(float64_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x4_t __ret;
-  __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __p0 / __p1;
-  return __ret;
-}
-#else
-__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __rev0 / __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \
-  poly8x8_t __s0 = __p0; \
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_p16(__p0, __p1) __extension__ ({ \
-  poly16x4_t __s0 = __p0; \
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \
-  uint8x8_t __s0 = __p0; \
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_u32(__p0, __p1) __extension__ ({ \
-  uint32x2_t __s0 = __p0; \
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \
-  uint64x1_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_u16(__p0, __p1) __extension__ ({ \
-  uint16x4_t __s0 = __p0; \
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \
-  int8x8_t __s0 = __p0; \
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_f32(__p0, __p1) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_lane_s32(__p0, __p1) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \
-  int64x1_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_lane_s16(__p0, __p1) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vdup_lane_p64(__p0_335, __p1_335) __extension__ ({ \
-  poly64x1_t __s0_335 = __p0_335; \
-  poly64x1_t __ret_335; \
-  __ret_335 = splat_lane_p64(__s0_335, __p1_335); \
-  __ret_335; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_p64(__p0_336, __p1_336) __extension__ ({ \
-  poly64x1_t __s0_336 = __p0_336; \
-  poly64x2_t __ret_336; \
-  __ret_336 = splatq_lane_p64(__s0_336, __p1_336); \
-  __ret_336; \
-})
-#else
-#define vdupq_lane_p64(__p0_337, __p1_337) __extension__ ({ \
-  poly64x1_t __s0_337 = __p0_337; \
-  poly64x2_t __ret_337; \
-  __ret_337 = __noswap_splatq_lane_p64(__s0_337, __p1_337); \
-  __ret_337 = __builtin_shufflevector(__ret_337, __ret_337, 1, 0); \
-  __ret_337; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f64(__p0_338, __p1_338) __extension__ ({ \
-  float64x1_t __s0_338 = __p0_338; \
-  float64x2_t __ret_338; \
-  __ret_338 = splatq_lane_f64(__s0_338, __p1_338); \
-  __ret_338; \
-})
-#else
-#define vdupq_lane_f64(__p0_339, __p1_339) __extension__ ({ \
-  float64x1_t __s0_339 = __p0_339; \
-  float64x2_t __ret_339; \
-  __ret_339 = __noswap_splatq_lane_f64(__s0_339, __p1_339); \
-  __ret_339 = __builtin_shufflevector(__ret_339, __ret_339, 1, 0); \
-  __ret_339; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_lane_f16(__p0_340, __p1_340) __extension__ ({ \
-  float16x4_t __s0_340 = __p0_340; \
-  float16x8_t __ret_340; \
-  __ret_340 = splatq_lane_f16(__s0_340, __p1_340); \
-  __ret_340; \
-})
-#else
-#define vdupq_lane_f16(__p0_341, __p1_341) __extension__ ({ \
-  float16x4_t __s0_341 = __p0_341; \
-  float16x4_t __rev0_341;  __rev0_341 = __builtin_shufflevector(__s0_341, __s0_341, 3, 2, 1, 0); \
-  float16x8_t __ret_341; \
-  __ret_341 = __noswap_splatq_lane_f16(__rev0_341, __p1_341); \
-  __ret_341 = __builtin_shufflevector(__ret_341, __ret_341, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_341; \
-})
-#endif
-
-#define vdup_lane_f64(__p0_342, __p1_342) __extension__ ({ \
-  float64x1_t __s0_342 = __p0_342; \
-  float64x1_t __ret_342; \
-  __ret_342 = splat_lane_f64(__s0_342, __p1_342); \
-  __ret_342; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vdup_lane_f16(__p0_343, __p1_343) __extension__ ({ \
-  float16x4_t __s0_343 = __p0_343; \
-  float16x4_t __ret_343; \
-  __ret_343 = splat_lane_f16(__s0_343, __p1_343); \
-  __ret_343; \
-})
-#else
-#define vdup_lane_f16(__p0_344, __p1_344) __extension__ ({ \
-  float16x4_t __s0_344 = __p0_344; \
-  float16x4_t __rev0_344;  __rev0_344 = __builtin_shufflevector(__s0_344, __s0_344, 3, 2, 1, 0); \
-  float16x4_t __ret_344; \
-  __ret_344 = __noswap_splat_lane_f16(__rev0_344, __p1_344); \
-  __ret_344 = __builtin_shufflevector(__ret_344, __ret_344, 3, 2, 1, 0); \
-  __ret_344; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \
-  poly8x16_t __s0 = __p0; \
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8_t __ret; \
-  __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \
-  poly16x8_t __s0 = __p0; \
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16_t __ret; \
-  __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \
-  uint8x16_t __s0 = __p0; \
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \
-  uint32x4_t __s0 = __p0; \
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \
-  uint64x2_t __s0 = __p0; \
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \
-  uint16x8_t __s0 = __p0; \
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \
-  int8x16_t __s0 = __p0; \
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \
-  int64x2_t __s0 = __p0; \
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p8(__p0_345, __p1_345) __extension__ ({ \
-  poly8x16_t __s0_345 = __p0_345; \
-  poly8x8_t __ret_345; \
-  __ret_345 = splat_laneq_p8(__s0_345, __p1_345); \
-  __ret_345; \
-})
-#else
-#define vdup_laneq_p8(__p0_346, __p1_346) __extension__ ({ \
-  poly8x16_t __s0_346 = __p0_346; \
-  poly8x16_t __rev0_346;  __rev0_346 = __builtin_shufflevector(__s0_346, __s0_346, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x8_t __ret_346; \
-  __ret_346 = __noswap_splat_laneq_p8(__rev0_346, __p1_346); \
-  __ret_346 = __builtin_shufflevector(__ret_346, __ret_346, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_346; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p64(__p0_347, __p1_347) __extension__ ({ \
-  poly64x2_t __s0_347 = __p0_347; \
-  poly64x1_t __ret_347; \
-  __ret_347 = splat_laneq_p64(__s0_347, __p1_347); \
-  __ret_347; \
-})
-#else
-#define vdup_laneq_p64(__p0_348, __p1_348) __extension__ ({ \
-  poly64x2_t __s0_348 = __p0_348; \
-  poly64x2_t __rev0_348;  __rev0_348 = __builtin_shufflevector(__s0_348, __s0_348, 1, 0); \
-  poly64x1_t __ret_348; \
-  __ret_348 = __noswap_splat_laneq_p64(__rev0_348, __p1_348); \
-  __ret_348; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_p16(__p0_349, __p1_349) __extension__ ({ \
-  poly16x8_t __s0_349 = __p0_349; \
-  poly16x4_t __ret_349; \
-  __ret_349 = splat_laneq_p16(__s0_349, __p1_349); \
-  __ret_349; \
-})
-#else
-#define vdup_laneq_p16(__p0_350, __p1_350) __extension__ ({ \
-  poly16x8_t __s0_350 = __p0_350; \
-  poly16x8_t __rev0_350;  __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x4_t __ret_350; \
-  __ret_350 = __noswap_splat_laneq_p16(__rev0_350, __p1_350); \
-  __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 3, 2, 1, 0); \
-  __ret_350; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p8(__p0_351, __p1_351) __extension__ ({ \
-  poly8x16_t __s0_351 = __p0_351; \
-  poly8x16_t __ret_351; \
-  __ret_351 = splatq_laneq_p8(__s0_351, __p1_351); \
-  __ret_351; \
-})
-#else
-#define vdupq_laneq_p8(__p0_352, __p1_352) __extension__ ({ \
-  poly8x16_t __s0_352 = __p0_352; \
-  poly8x16_t __rev0_352;  __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16_t __ret_352; \
-  __ret_352 = __noswap_splatq_laneq_p8(__rev0_352, __p1_352); \
-  __ret_352 = __builtin_shufflevector(__ret_352, __ret_352, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_352; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p64(__p0_353, __p1_353) __extension__ ({ \
-  poly64x2_t __s0_353 = __p0_353; \
-  poly64x2_t __ret_353; \
-  __ret_353 = splatq_laneq_p64(__s0_353, __p1_353); \
-  __ret_353; \
-})
-#else
-#define vdupq_laneq_p64(__p0_354, __p1_354) __extension__ ({ \
-  poly64x2_t __s0_354 = __p0_354; \
-  poly64x2_t __rev0_354;  __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 1, 0); \
-  poly64x2_t __ret_354; \
-  __ret_354 = __noswap_splatq_laneq_p64(__rev0_354, __p1_354); \
-  __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 1, 0); \
-  __ret_354; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_p16(__p0_355, __p1_355) __extension__ ({ \
-  poly16x8_t __s0_355 = __p0_355; \
-  poly16x8_t __ret_355; \
-  __ret_355 = splatq_laneq_p16(__s0_355, __p1_355); \
-  __ret_355; \
-})
-#else
-#define vdupq_laneq_p16(__p0_356, __p1_356) __extension__ ({ \
-  poly16x8_t __s0_356 = __p0_356; \
-  poly16x8_t __rev0_356;  __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly16x8_t __ret_356; \
-  __ret_356 = __noswap_splatq_laneq_p16(__rev0_356, __p1_356); \
-  __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_356; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u8(__p0_357, __p1_357) __extension__ ({ \
-  uint8x16_t __s0_357 = __p0_357; \
-  uint8x16_t __ret_357; \
-  __ret_357 = splatq_laneq_u8(__s0_357, __p1_357); \
-  __ret_357; \
-})
-#else
-#define vdupq_laneq_u8(__p0_358, __p1_358) __extension__ ({ \
-  uint8x16_t __s0_358 = __p0_358; \
-  uint8x16_t __rev0_358;  __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_358; \
-  __ret_358 = __noswap_splatq_laneq_u8(__rev0_358, __p1_358); \
-  __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_358; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u32(__p0_359, __p1_359) __extension__ ({ \
-  uint32x4_t __s0_359 = __p0_359; \
-  uint32x4_t __ret_359; \
-  __ret_359 = splatq_laneq_u32(__s0_359, __p1_359); \
-  __ret_359; \
-})
-#else
-#define vdupq_laneq_u32(__p0_360, __p1_360) __extension__ ({ \
-  uint32x4_t __s0_360 = __p0_360; \
-  uint32x4_t __rev0_360;  __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 3, 2, 1, 0); \
-  uint32x4_t __ret_360; \
-  __ret_360 = __noswap_splatq_laneq_u32(__rev0_360, __p1_360); \
-  __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 3, 2, 1, 0); \
-  __ret_360; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u64(__p0_361, __p1_361) __extension__ ({ \
-  uint64x2_t __s0_361 = __p0_361; \
-  uint64x2_t __ret_361; \
-  __ret_361 = splatq_laneq_u64(__s0_361, __p1_361); \
-  __ret_361; \
-})
-#else
-#define vdupq_laneq_u64(__p0_362, __p1_362) __extension__ ({ \
-  uint64x2_t __s0_362 = __p0_362; \
-  uint64x2_t __rev0_362;  __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 1, 0); \
-  uint64x2_t __ret_362; \
-  __ret_362 = __noswap_splatq_laneq_u64(__rev0_362, __p1_362); \
-  __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 1, 0); \
-  __ret_362; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_u16(__p0_363, __p1_363) __extension__ ({ \
-  uint16x8_t __s0_363 = __p0_363; \
-  uint16x8_t __ret_363; \
-  __ret_363 = splatq_laneq_u16(__s0_363, __p1_363); \
-  __ret_363; \
-})
-#else
-#define vdupq_laneq_u16(__p0_364, __p1_364) __extension__ ({ \
-  uint16x8_t __s0_364 = __p0_364; \
-  uint16x8_t __rev0_364;  __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_364; \
-  __ret_364 = __noswap_splatq_laneq_u16(__rev0_364, __p1_364); \
-  __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_364; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s8(__p0_365, __p1_365) __extension__ ({ \
-  int8x16_t __s0_365 = __p0_365; \
-  int8x16_t __ret_365; \
-  __ret_365 = splatq_laneq_s8(__s0_365, __p1_365); \
-  __ret_365; \
-})
-#else
-#define vdupq_laneq_s8(__p0_366, __p1_366) __extension__ ({ \
-  int8x16_t __s0_366 = __p0_366; \
-  int8x16_t __rev0_366;  __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_366; \
-  __ret_366 = __noswap_splatq_laneq_s8(__rev0_366, __p1_366); \
-  __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_366; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f64(__p0_367, __p1_367) __extension__ ({ \
-  float64x2_t __s0_367 = __p0_367; \
-  float64x2_t __ret_367; \
-  __ret_367 = splatq_laneq_f64(__s0_367, __p1_367); \
-  __ret_367; \
-})
-#else
-#define vdupq_laneq_f64(__p0_368, __p1_368) __extension__ ({ \
-  float64x2_t __s0_368 = __p0_368; \
-  float64x2_t __rev0_368;  __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 1, 0); \
-  float64x2_t __ret_368; \
-  __ret_368 = __noswap_splatq_laneq_f64(__rev0_368, __p1_368); \
-  __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 1, 0); \
-  __ret_368; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f32(__p0_369, __p1_369) __extension__ ({ \
-  float32x4_t __s0_369 = __p0_369; \
-  float32x4_t __ret_369; \
-  __ret_369 = splatq_laneq_f32(__s0_369, __p1_369); \
-  __ret_369; \
-})
-#else
-#define vdupq_laneq_f32(__p0_370, __p1_370) __extension__ ({ \
-  float32x4_t __s0_370 = __p0_370; \
-  float32x4_t __rev0_370;  __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 3, 2, 1, 0); \
-  float32x4_t __ret_370; \
-  __ret_370 = __noswap_splatq_laneq_f32(__rev0_370, __p1_370); \
-  __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 3, 2, 1, 0); \
-  __ret_370; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_f16(__p0_371, __p1_371) __extension__ ({ \
-  float16x8_t __s0_371 = __p0_371; \
-  float16x8_t __ret_371; \
-  __ret_371 = splatq_laneq_f16(__s0_371, __p1_371); \
-  __ret_371; \
-})
-#else
-#define vdupq_laneq_f16(__p0_372, __p1_372) __extension__ ({ \
-  float16x8_t __s0_372 = __p0_372; \
-  float16x8_t __rev0_372;  __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_372; \
-  __ret_372 = __noswap_splatq_laneq_f16(__rev0_372, __p1_372); \
-  __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_372; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s32(__p0_373, __p1_373) __extension__ ({ \
-  int32x4_t __s0_373 = __p0_373; \
-  int32x4_t __ret_373; \
-  __ret_373 = splatq_laneq_s32(__s0_373, __p1_373); \
-  __ret_373; \
-})
-#else
-#define vdupq_laneq_s32(__p0_374, __p1_374) __extension__ ({ \
-  int32x4_t __s0_374 = __p0_374; \
-  int32x4_t __rev0_374;  __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \
-  int32x4_t __ret_374; \
-  __ret_374 = __noswap_splatq_laneq_s32(__rev0_374, __p1_374); \
-  __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \
-  __ret_374; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s64(__p0_375, __p1_375) __extension__ ({ \
-  int64x2_t __s0_375 = __p0_375; \
-  int64x2_t __ret_375; \
-  __ret_375 = splatq_laneq_s64(__s0_375, __p1_375); \
-  __ret_375; \
-})
-#else
-#define vdupq_laneq_s64(__p0_376, __p1_376) __extension__ ({ \
-  int64x2_t __s0_376 = __p0_376; \
-  int64x2_t __rev0_376;  __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 1, 0); \
-  int64x2_t __ret_376; \
-  __ret_376 = __noswap_splatq_laneq_s64(__rev0_376, __p1_376); \
-  __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 1, 0); \
-  __ret_376; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdupq_laneq_s16(__p0_377, __p1_377) __extension__ ({ \
-  int16x8_t __s0_377 = __p0_377; \
-  int16x8_t __ret_377; \
-  __ret_377 = splatq_laneq_s16(__s0_377, __p1_377); \
-  __ret_377; \
-})
-#else
-#define vdupq_laneq_s16(__p0_378, __p1_378) __extension__ ({ \
-  int16x8_t __s0_378 = __p0_378; \
-  int16x8_t __rev0_378;  __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_378; \
-  __ret_378 = __noswap_splatq_laneq_s16(__rev0_378, __p1_378); \
-  __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_378; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u8(__p0_379, __p1_379) __extension__ ({ \
-  uint8x16_t __s0_379 = __p0_379; \
-  uint8x8_t __ret_379; \
-  __ret_379 = splat_laneq_u8(__s0_379, __p1_379); \
-  __ret_379; \
-})
-#else
-#define vdup_laneq_u8(__p0_380, __p1_380) __extension__ ({ \
-  uint8x16_t __s0_380 = __p0_380; \
-  uint8x16_t __rev0_380;  __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __ret_380; \
-  __ret_380 = __noswap_splat_laneq_u8(__rev0_380, __p1_380); \
-  __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_380; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u32(__p0_381, __p1_381) __extension__ ({ \
-  uint32x4_t __s0_381 = __p0_381; \
-  uint32x2_t __ret_381; \
-  __ret_381 = splat_laneq_u32(__s0_381, __p1_381); \
-  __ret_381; \
-})
-#else
-#define vdup_laneq_u32(__p0_382, __p1_382) __extension__ ({ \
-  uint32x4_t __s0_382 = __p0_382; \
-  uint32x4_t __rev0_382;  __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 3, 2, 1, 0); \
-  uint32x2_t __ret_382; \
-  __ret_382 = __noswap_splat_laneq_u32(__rev0_382, __p1_382); \
-  __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 1, 0); \
-  __ret_382; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u64(__p0_383, __p1_383) __extension__ ({ \
-  uint64x2_t __s0_383 = __p0_383; \
-  uint64x1_t __ret_383; \
-  __ret_383 = splat_laneq_u64(__s0_383, __p1_383); \
-  __ret_383; \
-})
-#else
-#define vdup_laneq_u64(__p0_384, __p1_384) __extension__ ({ \
-  uint64x2_t __s0_384 = __p0_384; \
-  uint64x2_t __rev0_384;  __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 1, 0); \
-  uint64x1_t __ret_384; \
-  __ret_384 = __noswap_splat_laneq_u64(__rev0_384, __p1_384); \
-  __ret_384; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_u16(__p0_385, __p1_385) __extension__ ({ \
-  uint16x8_t __s0_385 = __p0_385; \
-  uint16x4_t __ret_385; \
-  __ret_385 = splat_laneq_u16(__s0_385, __p1_385); \
-  __ret_385; \
-})
-#else
-#define vdup_laneq_u16(__p0_386, __p1_386) __extension__ ({ \
-  uint16x8_t __s0_386 = __p0_386; \
-  uint16x8_t __rev0_386;  __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_386; \
-  __ret_386 = __noswap_splat_laneq_u16(__rev0_386, __p1_386); \
-  __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 3, 2, 1, 0); \
-  __ret_386; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s8(__p0_387, __p1_387) __extension__ ({ \
-  int8x16_t __s0_387 = __p0_387; \
-  int8x8_t __ret_387; \
-  __ret_387 = splat_laneq_s8(__s0_387, __p1_387); \
-  __ret_387; \
-})
-#else
-#define vdup_laneq_s8(__p0_388, __p1_388) __extension__ ({ \
-  int8x16_t __s0_388 = __p0_388; \
-  int8x16_t __rev0_388;  __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x8_t __ret_388; \
-  __ret_388 = __noswap_splat_laneq_s8(__rev0_388, __p1_388); \
-  __ret_388 = __builtin_shufflevector(__ret_388, __ret_388, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_388; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f64(__p0_389, __p1_389) __extension__ ({ \
-  float64x2_t __s0_389 = __p0_389; \
-  float64x1_t __ret_389; \
-  __ret_389 = splat_laneq_f64(__s0_389, __p1_389); \
-  __ret_389; \
-})
-#else
-#define vdup_laneq_f64(__p0_390, __p1_390) __extension__ ({ \
-  float64x2_t __s0_390 = __p0_390; \
-  float64x2_t __rev0_390;  __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 1, 0); \
-  float64x1_t __ret_390; \
-  __ret_390 = __noswap_splat_laneq_f64(__rev0_390, __p1_390); \
-  __ret_390; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f32(__p0_391, __p1_391) __extension__ ({ \
-  float32x4_t __s0_391 = __p0_391; \
-  float32x2_t __ret_391; \
-  __ret_391 = splat_laneq_f32(__s0_391, __p1_391); \
-  __ret_391; \
-})
-#else
-#define vdup_laneq_f32(__p0_392, __p1_392) __extension__ ({ \
-  float32x4_t __s0_392 = __p0_392; \
-  float32x4_t __rev0_392;  __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 3, 2, 1, 0); \
-  float32x2_t __ret_392; \
-  __ret_392 = __noswap_splat_laneq_f32(__rev0_392, __p1_392); \
-  __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 1, 0); \
-  __ret_392; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_f16(__p0_393, __p1_393) __extension__ ({ \
-  float16x8_t __s0_393 = __p0_393; \
-  float16x4_t __ret_393; \
-  __ret_393 = splat_laneq_f16(__s0_393, __p1_393); \
-  __ret_393; \
-})
-#else
-#define vdup_laneq_f16(__p0_394, __p1_394) __extension__ ({ \
-  float16x8_t __s0_394 = __p0_394; \
-  float16x8_t __rev0_394;  __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __ret_394; \
-  __ret_394 = __noswap_splat_laneq_f16(__rev0_394, __p1_394); \
-  __ret_394 = __builtin_shufflevector(__ret_394, __ret_394, 3, 2, 1, 0); \
-  __ret_394; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s32(__p0_395, __p1_395) __extension__ ({ \
-  int32x4_t __s0_395 = __p0_395; \
-  int32x2_t __ret_395; \
-  __ret_395 = splat_laneq_s32(__s0_395, __p1_395); \
-  __ret_395; \
-})
-#else
-#define vdup_laneq_s32(__p0_396, __p1_396) __extension__ ({ \
-  int32x4_t __s0_396 = __p0_396; \
-  int32x4_t __rev0_396;  __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 3, 2, 1, 0); \
-  int32x2_t __ret_396; \
-  __ret_396 = __noswap_splat_laneq_s32(__rev0_396, __p1_396); \
-  __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 1, 0); \
-  __ret_396; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s64(__p0_397, __p1_397) __extension__ ({ \
-  int64x2_t __s0_397 = __p0_397; \
-  int64x1_t __ret_397; \
-  __ret_397 = splat_laneq_s64(__s0_397, __p1_397); \
-  __ret_397; \
-})
-#else
-#define vdup_laneq_s64(__p0_398, __p1_398) __extension__ ({ \
-  int64x2_t __s0_398 = __p0_398; \
-  int64x2_t __rev0_398;  __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 1, 0); \
-  int64x1_t __ret_398; \
-  __ret_398 = __noswap_splat_laneq_s64(__rev0_398, __p1_398); \
-  __ret_398; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vdup_laneq_s16(__p0_399, __p1_399) __extension__ ({ \
-  int16x8_t __s0_399 = __p0_399; \
-  int16x4_t __ret_399; \
-  __ret_399 = splat_laneq_s16(__s0_399, __p1_399); \
-  __ret_399; \
-})
-#else
-#define vdup_laneq_s16(__p0_400, __p1_400) __extension__ ({ \
-  int16x8_t __s0_400 = __p0_400; \
-  int16x8_t __rev0_400;  __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_400; \
-  __ret_400 = __noswap_splat_laneq_s16(__rev0_400, __p1_400); \
-  __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 3, 2, 1, 0); \
-  __ret_400; \
-})
-#endif
-
-__ai poly64x1_t vdup_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vdupq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vdupq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vdup_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#define vext_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vext_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10);
-  return __ret;
-}
-#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x2_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#else
-#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \
-  __ret; \
-})
-#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32_t __s0 = __p0; \
-  float32_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32_t __ret; \
-  __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#else
-#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x4_t __s0 = __p0; \
-  float32x4_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x4_t __ret; \
-  __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x2_t __s2 = __p2; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#else
-#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  float32x2_t __s0 = __p0; \
-  float32x2_t __s1 = __p1; \
-  float32x4_t __s2 = __p2; \
-  float32x2_t __ret; \
-  __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, __p2);
-  return __ret;
-}
-#define vfmsd_lane_f64(__p0_401, __p1_401, __p2_401, __p3_401) __extension__ ({ \
-  float64_t __s0_401 = __p0_401; \
-  float64_t __s1_401 = __p1_401; \
-  float64x1_t __s2_401 = __p2_401; \
-  float64_t __ret_401; \
-  __ret_401 = vfmad_lane_f64(__s0_401, -__s1_401, __s2_401, __p3_401); \
-  __ret_401; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_lane_f32(__p0_402, __p1_402, __p2_402, __p3_402) __extension__ ({ \
-  float32_t __s0_402 = __p0_402; \
-  float32_t __s1_402 = __p1_402; \
-  float32x2_t __s2_402 = __p2_402; \
-  float32_t __ret_402; \
-  __ret_402 = vfmas_lane_f32(__s0_402, -__s1_402, __s2_402, __p3_402); \
-  __ret_402; \
-})
-#else
-#define vfmss_lane_f32(__p0_403, __p1_403, __p2_403, __p3_403) __extension__ ({ \
-  float32_t __s0_403 = __p0_403; \
-  float32_t __s1_403 = __p1_403; \
-  float32x2_t __s2_403 = __p2_403; \
-  float32x2_t __rev2_403;  __rev2_403 = __builtin_shufflevector(__s2_403, __s2_403, 1, 0); \
-  float32_t __ret_403; \
-  __ret_403 = __noswap_vfmas_lane_f32(__s0_403, -__s1_403, __rev2_403, __p3_403); \
-  __ret_403; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f64(__p0_404, __p1_404, __p2_404, __p3_404) __extension__ ({ \
-  float64x2_t __s0_404 = __p0_404; \
-  float64x2_t __s1_404 = __p1_404; \
-  float64x1_t __s2_404 = __p2_404; \
-  float64x2_t __ret_404; \
-  __ret_404 = vfmaq_lane_f64(__s0_404, -__s1_404, __s2_404, __p3_404); \
-  __ret_404; \
-})
-#else
-#define vfmsq_lane_f64(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \
-  float64x2_t __s0_405 = __p0_405; \
-  float64x2_t __s1_405 = __p1_405; \
-  float64x1_t __s2_405 = __p2_405; \
-  float64x2_t __rev0_405;  __rev0_405 = __builtin_shufflevector(__s0_405, __s0_405, 1, 0); \
-  float64x2_t __rev1_405;  __rev1_405 = __builtin_shufflevector(__s1_405, __s1_405, 1, 0); \
-  float64x2_t __ret_405; \
-  __ret_405 = __noswap_vfmaq_lane_f64(__rev0_405, -__rev1_405, __s2_405, __p3_405); \
-  __ret_405 = __builtin_shufflevector(__ret_405, __ret_405, 1, 0); \
-  __ret_405; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_lane_f32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \
-  float32x4_t __s0_406 = __p0_406; \
-  float32x4_t __s1_406 = __p1_406; \
-  float32x2_t __s2_406 = __p2_406; \
-  float32x4_t __ret_406; \
-  __ret_406 = vfmaq_lane_f32(__s0_406, -__s1_406, __s2_406, __p3_406); \
-  __ret_406; \
-})
-#else
-#define vfmsq_lane_f32(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \
-  float32x4_t __s0_407 = __p0_407; \
-  float32x4_t __s1_407 = __p1_407; \
-  float32x2_t __s2_407 = __p2_407; \
-  float32x4_t __rev0_407;  __rev0_407 = __builtin_shufflevector(__s0_407, __s0_407, 3, 2, 1, 0); \
-  float32x4_t __rev1_407;  __rev1_407 = __builtin_shufflevector(__s1_407, __s1_407, 3, 2, 1, 0); \
-  float32x2_t __rev2_407;  __rev2_407 = __builtin_shufflevector(__s2_407, __s2_407, 1, 0); \
-  float32x4_t __ret_407; \
-  __ret_407 = __noswap_vfmaq_lane_f32(__rev0_407, -__rev1_407, __rev2_407, __p3_407); \
-  __ret_407 = __builtin_shufflevector(__ret_407, __ret_407, 3, 2, 1, 0); \
-  __ret_407; \
-})
-#endif
-
-#define vfms_lane_f64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \
-  float64x1_t __s0_408 = __p0_408; \
-  float64x1_t __s1_408 = __p1_408; \
-  float64x1_t __s2_408 = __p2_408; \
-  float64x1_t __ret_408; \
-  __ret_408 = vfma_lane_f64(__s0_408, -__s1_408, __s2_408, __p3_408); \
-  __ret_408; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vfms_lane_f32(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \
-  float32x2_t __s0_409 = __p0_409; \
-  float32x2_t __s1_409 = __p1_409; \
-  float32x2_t __s2_409 = __p2_409; \
-  float32x2_t __ret_409; \
-  __ret_409 = vfma_lane_f32(__s0_409, -__s1_409, __s2_409, __p3_409); \
-  __ret_409; \
-})
-#else
-#define vfms_lane_f32(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \
-  float32x2_t __s0_410 = __p0_410; \
-  float32x2_t __s1_410 = __p1_410; \
-  float32x2_t __s2_410 = __p2_410; \
-  float32x2_t __rev0_410;  __rev0_410 = __builtin_shufflevector(__s0_410, __s0_410, 1, 0); \
-  float32x2_t __rev1_410;  __rev1_410 = __builtin_shufflevector(__s1_410, __s1_410, 1, 0); \
-  float32x2_t __rev2_410;  __rev2_410 = __builtin_shufflevector(__s2_410, __s2_410, 1, 0); \
-  float32x2_t __ret_410; \
-  __ret_410 = __noswap_vfma_lane_f32(__rev0_410, -__rev1_410, __rev2_410, __p3_410); \
-  __ret_410 = __builtin_shufflevector(__ret_410, __ret_410, 1, 0); \
-  __ret_410; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsd_laneq_f64(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \
-  float64_t __s0_411 = __p0_411; \
-  float64_t __s1_411 = __p1_411; \
-  float64x2_t __s2_411 = __p2_411; \
-  float64_t __ret_411; \
-  __ret_411 = vfmad_laneq_f64(__s0_411, -__s1_411, __s2_411, __p3_411); \
-  __ret_411; \
-})
-#else
-#define vfmsd_laneq_f64(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \
-  float64_t __s0_412 = __p0_412; \
-  float64_t __s1_412 = __p1_412; \
-  float64x2_t __s2_412 = __p2_412; \
-  float64x2_t __rev2_412;  __rev2_412 = __builtin_shufflevector(__s2_412, __s2_412, 1, 0); \
-  float64_t __ret_412; \
-  __ret_412 = __noswap_vfmad_laneq_f64(__s0_412, -__s1_412, __rev2_412, __p3_412); \
-  __ret_412; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmss_laneq_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \
-  float32_t __s0_413 = __p0_413; \
-  float32_t __s1_413 = __p1_413; \
-  float32x4_t __s2_413 = __p2_413; \
-  float32_t __ret_413; \
-  __ret_413 = vfmas_laneq_f32(__s0_413, -__s1_413, __s2_413, __p3_413); \
-  __ret_413; \
-})
-#else
-#define vfmss_laneq_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \
-  float32_t __s0_414 = __p0_414; \
-  float32_t __s1_414 = __p1_414; \
-  float32x4_t __s2_414 = __p2_414; \
-  float32x4_t __rev2_414;  __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 3, 2, 1, 0); \
-  float32_t __ret_414; \
-  __ret_414 = __noswap_vfmas_laneq_f32(__s0_414, -__s1_414, __rev2_414, __p3_414); \
-  __ret_414; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f64(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \
-  float64x2_t __s0_415 = __p0_415; \
-  float64x2_t __s1_415 = __p1_415; \
-  float64x2_t __s2_415 = __p2_415; \
-  float64x2_t __ret_415; \
-  __ret_415 = vfmaq_laneq_f64(__s0_415, -__s1_415, __s2_415, __p3_415); \
-  __ret_415; \
-})
-#else
-#define vfmsq_laneq_f64(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \
-  float64x2_t __s0_416 = __p0_416; \
-  float64x2_t __s1_416 = __p1_416; \
-  float64x2_t __s2_416 = __p2_416; \
-  float64x2_t __rev0_416;  __rev0_416 = __builtin_shufflevector(__s0_416, __s0_416, 1, 0); \
-  float64x2_t __rev1_416;  __rev1_416 = __builtin_shufflevector(__s1_416, __s1_416, 1, 0); \
-  float64x2_t __rev2_416;  __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 1, 0); \
-  float64x2_t __ret_416; \
-  __ret_416 = __noswap_vfmaq_laneq_f64(__rev0_416, -__rev1_416, __rev2_416, __p3_416); \
-  __ret_416 = __builtin_shufflevector(__ret_416, __ret_416, 1, 0); \
-  __ret_416; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmsq_laneq_f32(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \
-  float32x4_t __s0_417 = __p0_417; \
-  float32x4_t __s1_417 = __p1_417; \
-  float32x4_t __s2_417 = __p2_417; \
-  float32x4_t __ret_417; \
-  __ret_417 = vfmaq_laneq_f32(__s0_417, -__s1_417, __s2_417, __p3_417); \
-  __ret_417; \
-})
-#else
-#define vfmsq_laneq_f32(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \
-  float32x4_t __s0_418 = __p0_418; \
-  float32x4_t __s1_418 = __p1_418; \
-  float32x4_t __s2_418 = __p2_418; \
-  float32x4_t __rev0_418;  __rev0_418 = __builtin_shufflevector(__s0_418, __s0_418, 3, 2, 1, 0); \
-  float32x4_t __rev1_418;  __rev1_418 = __builtin_shufflevector(__s1_418, __s1_418, 3, 2, 1, 0); \
-  float32x4_t __rev2_418;  __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 3, 2, 1, 0); \
-  float32x4_t __ret_418; \
-  __ret_418 = __noswap_vfmaq_laneq_f32(__rev0_418, -__rev1_418, __rev2_418, __p3_418); \
-  __ret_418 = __builtin_shufflevector(__ret_418, __ret_418, 3, 2, 1, 0); \
-  __ret_418; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f64(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \
-  float64x1_t __s0_419 = __p0_419; \
-  float64x1_t __s1_419 = __p1_419; \
-  float64x2_t __s2_419 = __p2_419; \
-  float64x1_t __ret_419; \
-  __ret_419 = vfma_laneq_f64(__s0_419, -__s1_419, __s2_419, __p3_419); \
-  __ret_419; \
-})
-#else
-#define vfms_laneq_f64(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \
-  float64x1_t __s0_420 = __p0_420; \
-  float64x1_t __s1_420 = __p1_420; \
-  float64x2_t __s2_420 = __p2_420; \
-  float64x2_t __rev2_420;  __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 1, 0); \
-  float64x1_t __ret_420; \
-  __ret_420 = __noswap_vfma_laneq_f64(__s0_420, -__s1_420, __rev2_420, __p3_420); \
-  __ret_420; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfms_laneq_f32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \
-  float32x2_t __s0_421 = __p0_421; \
-  float32x2_t __s1_421 = __p1_421; \
-  float32x4_t __s2_421 = __p2_421; \
-  float32x2_t __ret_421; \
-  __ret_421 = vfma_laneq_f32(__s0_421, -__s1_421, __s2_421, __p3_421); \
-  __ret_421; \
-})
-#else
-#define vfms_laneq_f32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \
-  float32x2_t __s0_422 = __p0_422; \
-  float32x2_t __s1_422 = __p1_422; \
-  float32x4_t __s2_422 = __p2_422; \
-  float32x2_t __rev0_422;  __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 1, 0); \
-  float32x2_t __rev1_422;  __rev1_422 = __builtin_shufflevector(__s1_422, __s1_422, 1, 0); \
-  float32x4_t __rev2_422;  __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \
-  float32x2_t __ret_422; \
-  __ret_422 = __noswap_vfma_laneq_f32(__rev0_422, -__rev1_422, __rev2_422, __p3_422); \
-  __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 1, 0); \
-  __ret_422; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __ret;
-  __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) {
-  float64x1_t __ret;
-  __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2});
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __ret;
-  __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_high_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 1);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_high_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 1);
-  return __ret;
-}
-#endif
-
-#define vget_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64_t __ret; \
-  __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#else
-#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \
-  __ret; \
-})
-#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \
-  __ret; \
-})
-#endif
-
-#define vget_lane_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64_t __ret; \
-  __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai poly64x1_t vget_low_p64(poly64x2_t __p0) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p0, 0);
-  return __ret;
-}
-#else
-__ai float64x1_t vget_low_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x1_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev0, 0);
-  return __ret;
-}
-#endif
-
-#define vld1_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_dup_p64(__p0) __extension__ ({ \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_dup_p64(__p0) __extension__ ({ \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_dup_f64(__p0) __extension__ ({ \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_dup_f64(__p0) __extension__ ({ \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \
-  __ret; \
-})
-#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-  __ret; \
-})
-#else
-#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#define vld1_p64_x2(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x2(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x2(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x2(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x3(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x3(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x3(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x3(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld1_p64_x4(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld1q_p64_x4(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld1q_f64_x4(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld1_f64_x4(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld2q_u64(__p0) __extension__ ({ \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld2q_s64(__p0) __extension__ ({ \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_dup_p64(__p0) __extension__ ({ \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld2q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld2q_dup_f64(__p0) __extension__ ({ \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_dup_f64(__p0) __extension__ ({ \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  poly64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  poly64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  uint64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  float64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  int64x2x2_t __ret; \
-  __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  uint64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-  __ret; \
-})
-#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  float64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-  __ret; \
-})
-#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  int64x1x2_t __ret; \
-  __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-  __ret; \
-})
-#define vld3_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld3q_u64(__p0) __extension__ ({ \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld3q_s64(__p0) __extension__ ({ \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_dup_p64(__p0) __extension__ ({ \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld3q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld3q_dup_f64(__p0) __extension__ ({ \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_dup_f64(__p0) __extension__ ({ \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  poly64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  poly64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  uint64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  float64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  int64x2x3_t __ret; \
-  __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  uint64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-  __ret; \
-})
-#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  float64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-  __ret; \
-})
-#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  int64x1x3_t __ret; \
-  __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-  __ret; \
-})
-#define vld4_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
-  __ret; \
-})
-#else
-#define vld4q_u64(__p0) __extension__ ({ \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
-  __ret; \
-})
-#else
-#define vld4q_s64(__p0) __extension__ ({ \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_v(&__ret, __p0, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_dup_p64(__p0) __extension__ ({ \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
-  __ret; \
-})
-#else
-#define vld4q_dup_p64(__p0) __extension__ ({ \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
-  __ret; \
-})
-#else
-#define vld4q_dup_f64(__p0) __extension__ ({ \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_dup_f64(__p0) __extension__ ({ \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \
-  __ret; \
-})
-#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  poly64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  poly8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-  __ret; \
-})
-#else
-#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  poly64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-  __ret; \
-})
-#else
-#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  uint64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-  __ret; \
-})
-#else
-#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  float64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-  __ret; \
-})
-#else
-#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  int64x2x4_t __ret; \
-  __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
- \
-  __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \
-  __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \
-  __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \
-  __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  uint64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-  __ret; \
-})
-#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  float64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-  __ret; \
-})
-#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  int64x1x4_t __ret; \
-  __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-  __ret; \
-})
-#define vldrq_p128(__p0) __extension__ ({ \
-  poly128_t __ret; \
-  __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vmaxvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vmaxv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vmaxv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vmaxv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vmaxv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vmaxv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vmaxv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vmaxv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminnmvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminnmv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminvq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminvq_u32(uint32x4_t __p0) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminvq_u16(uint16x8_t __p0) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminvq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vminvq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminvq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminvq_s32(int32x4_t __p0) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminvq_s16(int16x8_t __p0) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__p0);
-  return __ret;
-}
-#else
-__ai uint8_t vminv_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__p0);
-  return __ret;
-}
-#else
-__ai uint32_t vminv_u32(uint32x2_t __p0) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__p0);
-  return __ret;
-}
-#else
-__ai uint16_t vminv_u16(uint16x4_t __p0) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__p0);
-  return __ret;
-}
-#else
-__ai int8_t vminv_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vminv_s8(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vminv_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vminv_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__p0);
-  return __ret;
-}
-#else
-__ai int32_t vminv_s32(int32x2_t __p0) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vminv_s32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__p0);
-  return __ret;
-}
-#else
-__ai int16_t vminv_s16(int16x4_t __p0) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vminv_s16(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 + __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u32(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \
-  uint32x4_t __s0_423 = __p0_423; \
-  uint32x4_t __s1_423 = __p1_423; \
-  uint32x4_t __s2_423 = __p2_423; \
-  uint32x4_t __ret_423; \
-  __ret_423 = __s0_423 + __s1_423 * splatq_laneq_u32(__s2_423, __p3_423); \
-  __ret_423; \
-})
-#else
-#define vmlaq_laneq_u32(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \
-  uint32x4_t __s0_424 = __p0_424; \
-  uint32x4_t __s1_424 = __p1_424; \
-  uint32x4_t __s2_424 = __p2_424; \
-  uint32x4_t __rev0_424;  __rev0_424 = __builtin_shufflevector(__s0_424, __s0_424, 3, 2, 1, 0); \
-  uint32x4_t __rev1_424;  __rev1_424 = __builtin_shufflevector(__s1_424, __s1_424, 3, 2, 1, 0); \
-  uint32x4_t __rev2_424;  __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 3, 2, 1, 0); \
-  uint32x4_t __ret_424; \
-  __ret_424 = __rev0_424 + __rev1_424 * __noswap_splatq_laneq_u32(__rev2_424, __p3_424); \
-  __ret_424 = __builtin_shufflevector(__ret_424, __ret_424, 3, 2, 1, 0); \
-  __ret_424; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_u16(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \
-  uint16x8_t __s0_425 = __p0_425; \
-  uint16x8_t __s1_425 = __p1_425; \
-  uint16x8_t __s2_425 = __p2_425; \
-  uint16x8_t __ret_425; \
-  __ret_425 = __s0_425 + __s1_425 * splatq_laneq_u16(__s2_425, __p3_425); \
-  __ret_425; \
-})
-#else
-#define vmlaq_laneq_u16(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \
-  uint16x8_t __s0_426 = __p0_426; \
-  uint16x8_t __s1_426 = __p1_426; \
-  uint16x8_t __s2_426 = __p2_426; \
-  uint16x8_t __rev0_426;  __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_426;  __rev1_426 = __builtin_shufflevector(__s1_426, __s1_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_426;  __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_426; \
-  __ret_426 = __rev0_426 + __rev1_426 * __noswap_splatq_laneq_u16(__rev2_426, __p3_426); \
-  __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_426; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_f32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \
-  float32x4_t __s0_427 = __p0_427; \
-  float32x4_t __s1_427 = __p1_427; \
-  float32x4_t __s2_427 = __p2_427; \
-  float32x4_t __ret_427; \
-  __ret_427 = __s0_427 + __s1_427 * splatq_laneq_f32(__s2_427, __p3_427); \
-  __ret_427; \
-})
-#else
-#define vmlaq_laneq_f32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \
-  float32x4_t __s0_428 = __p0_428; \
-  float32x4_t __s1_428 = __p1_428; \
-  float32x4_t __s2_428 = __p2_428; \
-  float32x4_t __rev0_428;  __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 3, 2, 1, 0); \
-  float32x4_t __rev1_428;  __rev1_428 = __builtin_shufflevector(__s1_428, __s1_428, 3, 2, 1, 0); \
-  float32x4_t __rev2_428;  __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \
-  float32x4_t __ret_428; \
-  __ret_428 = __rev0_428 + __rev1_428 * __noswap_splatq_laneq_f32(__rev2_428, __p3_428); \
-  __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 3, 2, 1, 0); \
-  __ret_428; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s32(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \
-  int32x4_t __s0_429 = __p0_429; \
-  int32x4_t __s1_429 = __p1_429; \
-  int32x4_t __s2_429 = __p2_429; \
-  int32x4_t __ret_429; \
-  __ret_429 = __s0_429 + __s1_429 * splatq_laneq_s32(__s2_429, __p3_429); \
-  __ret_429; \
-})
-#else
-#define vmlaq_laneq_s32(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \
-  int32x4_t __s0_430 = __p0_430; \
-  int32x4_t __s1_430 = __p1_430; \
-  int32x4_t __s2_430 = __p2_430; \
-  int32x4_t __rev0_430;  __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 3, 2, 1, 0); \
-  int32x4_t __rev1_430;  __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 3, 2, 1, 0); \
-  int32x4_t __rev2_430;  __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 3, 2, 1, 0); \
-  int32x4_t __ret_430; \
-  __ret_430 = __rev0_430 + __rev1_430 * __noswap_splatq_laneq_s32(__rev2_430, __p3_430); \
-  __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 3, 2, 1, 0); \
-  __ret_430; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlaq_laneq_s16(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \
-  int16x8_t __s0_431 = __p0_431; \
-  int16x8_t __s1_431 = __p1_431; \
-  int16x8_t __s2_431 = __p2_431; \
-  int16x8_t __ret_431; \
-  __ret_431 = __s0_431 + __s1_431 * splatq_laneq_s16(__s2_431, __p3_431); \
-  __ret_431; \
-})
-#else
-#define vmlaq_laneq_s16(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \
-  int16x8_t __s0_432 = __p0_432; \
-  int16x8_t __s1_432 = __p1_432; \
-  int16x8_t __s2_432 = __p2_432; \
-  int16x8_t __rev0_432;  __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_432;  __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_432;  __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_432; \
-  __ret_432 = __rev0_432 + __rev1_432 * __noswap_splatq_laneq_s16(__rev2_432, __p3_432); \
-  __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_432; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u32(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \
-  uint32x2_t __s0_433 = __p0_433; \
-  uint32x2_t __s1_433 = __p1_433; \
-  uint32x4_t __s2_433 = __p2_433; \
-  uint32x2_t __ret_433; \
-  __ret_433 = __s0_433 + __s1_433 * splat_laneq_u32(__s2_433, __p3_433); \
-  __ret_433; \
-})
-#else
-#define vmla_laneq_u32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \
-  uint32x2_t __s0_434 = __p0_434; \
-  uint32x2_t __s1_434 = __p1_434; \
-  uint32x4_t __s2_434 = __p2_434; \
-  uint32x2_t __rev0_434;  __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 1, 0); \
-  uint32x2_t __rev1_434;  __rev1_434 = __builtin_shufflevector(__s1_434, __s1_434, 1, 0); \
-  uint32x4_t __rev2_434;  __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 3, 2, 1, 0); \
-  uint32x2_t __ret_434; \
-  __ret_434 = __rev0_434 + __rev1_434 * __noswap_splat_laneq_u32(__rev2_434, __p3_434); \
-  __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 1, 0); \
-  __ret_434; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_u16(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \
-  uint16x4_t __s0_435 = __p0_435; \
-  uint16x4_t __s1_435 = __p1_435; \
-  uint16x8_t __s2_435 = __p2_435; \
-  uint16x4_t __ret_435; \
-  __ret_435 = __s0_435 + __s1_435 * splat_laneq_u16(__s2_435, __p3_435); \
-  __ret_435; \
-})
-#else
-#define vmla_laneq_u16(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \
-  uint16x4_t __s0_436 = __p0_436; \
-  uint16x4_t __s1_436 = __p1_436; \
-  uint16x8_t __s2_436 = __p2_436; \
-  uint16x4_t __rev0_436;  __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 3, 2, 1, 0); \
-  uint16x4_t __rev1_436;  __rev1_436 = __builtin_shufflevector(__s1_436, __s1_436, 3, 2, 1, 0); \
-  uint16x8_t __rev2_436;  __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_436; \
-  __ret_436 = __rev0_436 + __rev1_436 * __noswap_splat_laneq_u16(__rev2_436, __p3_436); \
-  __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 3, 2, 1, 0); \
-  __ret_436; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_f32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \
-  float32x2_t __s0_437 = __p0_437; \
-  float32x2_t __s1_437 = __p1_437; \
-  float32x4_t __s2_437 = __p2_437; \
-  float32x2_t __ret_437; \
-  __ret_437 = __s0_437 + __s1_437 * splat_laneq_f32(__s2_437, __p3_437); \
-  __ret_437; \
-})
-#else
-#define vmla_laneq_f32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \
-  float32x2_t __s0_438 = __p0_438; \
-  float32x2_t __s1_438 = __p1_438; \
-  float32x4_t __s2_438 = __p2_438; \
-  float32x2_t __rev0_438;  __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \
-  float32x2_t __rev1_438;  __rev1_438 = __builtin_shufflevector(__s1_438, __s1_438, 1, 0); \
-  float32x4_t __rev2_438;  __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \
-  float32x2_t __ret_438; \
-  __ret_438 = __rev0_438 + __rev1_438 * __noswap_splat_laneq_f32(__rev2_438, __p3_438); \
-  __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \
-  __ret_438; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s32(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \
-  int32x2_t __s0_439 = __p0_439; \
-  int32x2_t __s1_439 = __p1_439; \
-  int32x4_t __s2_439 = __p2_439; \
-  int32x2_t __ret_439; \
-  __ret_439 = __s0_439 + __s1_439 * splat_laneq_s32(__s2_439, __p3_439); \
-  __ret_439; \
-})
-#else
-#define vmla_laneq_s32(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \
-  int32x2_t __s0_440 = __p0_440; \
-  int32x2_t __s1_440 = __p1_440; \
-  int32x4_t __s2_440 = __p2_440; \
-  int32x2_t __rev0_440;  __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 1, 0); \
-  int32x2_t __rev1_440;  __rev1_440 = __builtin_shufflevector(__s1_440, __s1_440, 1, 0); \
-  int32x4_t __rev2_440;  __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 3, 2, 1, 0); \
-  int32x2_t __ret_440; \
-  __ret_440 = __rev0_440 + __rev1_440 * __noswap_splat_laneq_s32(__rev2_440, __p3_440); \
-  __ret_440 = __builtin_shufflevector(__ret_440, __ret_440, 1, 0); \
-  __ret_440; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmla_laneq_s16(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \
-  int16x4_t __s0_441 = __p0_441; \
-  int16x4_t __s1_441 = __p1_441; \
-  int16x8_t __s2_441 = __p2_441; \
-  int16x4_t __ret_441; \
-  __ret_441 = __s0_441 + __s1_441 * splat_laneq_s16(__s2_441, __p3_441); \
-  __ret_441; \
-})
-#else
-#define vmla_laneq_s16(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \
-  int16x4_t __s0_442 = __p0_442; \
-  int16x4_t __s1_442 = __p1_442; \
-  int16x8_t __s2_442 = __p2_442; \
-  int16x4_t __rev0_442;  __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 3, 2, 1, 0); \
-  int16x4_t __rev1_442;  __rev1_442 = __builtin_shufflevector(__s1_442, __s1_442, 3, 2, 1, 0); \
-  int16x8_t __rev2_442;  __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_442; \
-  __ret_442 = __rev0_442 + __rev1_442 * __noswap_splat_laneq_s16(__rev2_442, __p3_442); \
-  __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 3, 2, 1, 0); \
-  __ret_442; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 + __p1 * (float64x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float64x2_t vmlaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 + __rev1 * (float64x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \
-  uint64x2_t __s0_443 = __p0_443; \
-  uint32x4_t __s1_443 = __p1_443; \
-  uint32x2_t __s2_443 = __p2_443; \
-  uint64x2_t __ret_443; \
-  __ret_443 = __s0_443 + vmull_u32(vget_high_u32(__s1_443), splat_lane_u32(__s2_443, __p3_443)); \
-  __ret_443; \
-})
-#else
-#define vmlal_high_lane_u32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \
-  uint64x2_t __s0_444 = __p0_444; \
-  uint32x4_t __s1_444 = __p1_444; \
-  uint32x2_t __s2_444 = __p2_444; \
-  uint64x2_t __rev0_444;  __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 1, 0); \
-  uint32x4_t __rev1_444;  __rev1_444 = __builtin_shufflevector(__s1_444, __s1_444, 3, 2, 1, 0); \
-  uint32x2_t __rev2_444;  __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, 1, 0); \
-  uint64x2_t __ret_444; \
-  __ret_444 = __rev0_444 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_444), __noswap_splat_lane_u32(__rev2_444, __p3_444)); \
-  __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 1, 0); \
-  __ret_444; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_u16(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \
-  uint32x4_t __s0_445 = __p0_445; \
-  uint16x8_t __s1_445 = __p1_445; \
-  uint16x4_t __s2_445 = __p2_445; \
-  uint32x4_t __ret_445; \
-  __ret_445 = __s0_445 + vmull_u16(vget_high_u16(__s1_445), splat_lane_u16(__s2_445, __p3_445)); \
-  __ret_445; \
-})
-#else
-#define vmlal_high_lane_u16(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \
-  uint32x4_t __s0_446 = __p0_446; \
-  uint16x8_t __s1_446 = __p1_446; \
-  uint16x4_t __s2_446 = __p2_446; \
-  uint32x4_t __rev0_446;  __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 3, 2, 1, 0); \
-  uint16x8_t __rev1_446;  __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_446;  __rev2_446 = __builtin_shufflevector(__s2_446, __s2_446, 3, 2, 1, 0); \
-  uint32x4_t __ret_446; \
-  __ret_446 = __rev0_446 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_446), __noswap_splat_lane_u16(__rev2_446, __p3_446)); \
-  __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 3, 2, 1, 0); \
-  __ret_446; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \
-  int64x2_t __s0_447 = __p0_447; \
-  int32x4_t __s1_447 = __p1_447; \
-  int32x2_t __s2_447 = __p2_447; \
-  int64x2_t __ret_447; \
-  __ret_447 = __s0_447 + vmull_s32(vget_high_s32(__s1_447), splat_lane_s32(__s2_447, __p3_447)); \
-  __ret_447; \
-})
-#else
-#define vmlal_high_lane_s32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \
-  int64x2_t __s0_448 = __p0_448; \
-  int32x4_t __s1_448 = __p1_448; \
-  int32x2_t __s2_448 = __p2_448; \
-  int64x2_t __rev0_448;  __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 1, 0); \
-  int32x4_t __rev1_448;  __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, 3, 2, 1, 0); \
-  int32x2_t __rev2_448;  __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, 1, 0); \
-  int64x2_t __ret_448; \
-  __ret_448 = __rev0_448 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_448), __noswap_splat_lane_s32(__rev2_448, __p3_448)); \
-  __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 1, 0); \
-  __ret_448; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_lane_s16(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \
-  int32x4_t __s0_449 = __p0_449; \
-  int16x8_t __s1_449 = __p1_449; \
-  int16x4_t __s2_449 = __p2_449; \
-  int32x4_t __ret_449; \
-  __ret_449 = __s0_449 + vmull_s16(vget_high_s16(__s1_449), splat_lane_s16(__s2_449, __p3_449)); \
-  __ret_449; \
-})
-#else
-#define vmlal_high_lane_s16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \
-  int32x4_t __s0_450 = __p0_450; \
-  int16x8_t __s1_450 = __p1_450; \
-  int16x4_t __s2_450 = __p2_450; \
-  int32x4_t __rev0_450;  __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 3, 2, 1, 0); \
-  int16x8_t __rev1_450;  __rev1_450 = __builtin_shufflevector(__s1_450, __s1_450, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_450;  __rev2_450 = __builtin_shufflevector(__s2_450, __s2_450, 3, 2, 1, 0); \
-  int32x4_t __ret_450; \
-  __ret_450 = __rev0_450 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_450), __noswap_splat_lane_s16(__rev2_450, __p3_450)); \
-  __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 3, 2, 1, 0); \
-  __ret_450; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \
-  uint64x2_t __s0_451 = __p0_451; \
-  uint32x4_t __s1_451 = __p1_451; \
-  uint32x4_t __s2_451 = __p2_451; \
-  uint64x2_t __ret_451; \
-  __ret_451 = __s0_451 + vmull_u32(vget_high_u32(__s1_451), splat_laneq_u32(__s2_451, __p3_451)); \
-  __ret_451; \
-})
-#else
-#define vmlal_high_laneq_u32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \
-  uint64x2_t __s0_452 = __p0_452; \
-  uint32x4_t __s1_452 = __p1_452; \
-  uint32x4_t __s2_452 = __p2_452; \
-  uint64x2_t __rev0_452;  __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \
-  uint32x4_t __rev1_452;  __rev1_452 = __builtin_shufflevector(__s1_452, __s1_452, 3, 2, 1, 0); \
-  uint32x4_t __rev2_452;  __rev2_452 = __builtin_shufflevector(__s2_452, __s2_452, 3, 2, 1, 0); \
-  uint64x2_t __ret_452; \
-  __ret_452 = __rev0_452 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_452), __noswap_splat_laneq_u32(__rev2_452, __p3_452)); \
-  __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 1, 0); \
-  __ret_452; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_u16(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \
-  uint32x4_t __s0_453 = __p0_453; \
-  uint16x8_t __s1_453 = __p1_453; \
-  uint16x8_t __s2_453 = __p2_453; \
-  uint32x4_t __ret_453; \
-  __ret_453 = __s0_453 + vmull_u16(vget_high_u16(__s1_453), splat_laneq_u16(__s2_453, __p3_453)); \
-  __ret_453; \
-})
-#else
-#define vmlal_high_laneq_u16(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \
-  uint32x4_t __s0_454 = __p0_454; \
-  uint16x8_t __s1_454 = __p1_454; \
-  uint16x8_t __s2_454 = __p2_454; \
-  uint32x4_t __rev0_454;  __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 3, 2, 1, 0); \
-  uint16x8_t __rev1_454;  __rev1_454 = __builtin_shufflevector(__s1_454, __s1_454, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_454;  __rev2_454 = __builtin_shufflevector(__s2_454, __s2_454, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_454; \
-  __ret_454 = __rev0_454 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_454), __noswap_splat_laneq_u16(__rev2_454, __p3_454)); \
-  __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \
-  __ret_454; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \
-  int64x2_t __s0_455 = __p0_455; \
-  int32x4_t __s1_455 = __p1_455; \
-  int32x4_t __s2_455 = __p2_455; \
-  int64x2_t __ret_455; \
-  __ret_455 = __s0_455 + vmull_s32(vget_high_s32(__s1_455), splat_laneq_s32(__s2_455, __p3_455)); \
-  __ret_455; \
-})
-#else
-#define vmlal_high_laneq_s32(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \
-  int64x2_t __s0_456 = __p0_456; \
-  int32x4_t __s1_456 = __p1_456; \
-  int32x4_t __s2_456 = __p2_456; \
-  int64x2_t __rev0_456;  __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \
-  int32x4_t __rev1_456;  __rev1_456 = __builtin_shufflevector(__s1_456, __s1_456, 3, 2, 1, 0); \
-  int32x4_t __rev2_456;  __rev2_456 = __builtin_shufflevector(__s2_456, __s2_456, 3, 2, 1, 0); \
-  int64x2_t __ret_456; \
-  __ret_456 = __rev0_456 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_456), __noswap_splat_laneq_s32(__rev2_456, __p3_456)); \
-  __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 1, 0); \
-  __ret_456; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_high_laneq_s16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \
-  int32x4_t __s0_457 = __p0_457; \
-  int16x8_t __s1_457 = __p1_457; \
-  int16x8_t __s2_457 = __p2_457; \
-  int32x4_t __ret_457; \
-  __ret_457 = __s0_457 + vmull_s16(vget_high_s16(__s1_457), splat_laneq_s16(__s2_457, __p3_457)); \
-  __ret_457; \
-})
-#else
-#define vmlal_high_laneq_s16(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \
-  int32x4_t __s0_458 = __p0_458; \
-  int16x8_t __s1_458 = __p1_458; \
-  int16x8_t __s2_458 = __p2_458; \
-  int32x4_t __rev0_458;  __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 3, 2, 1, 0); \
-  int16x8_t __rev1_458;  __rev1_458 = __builtin_shufflevector(__s1_458, __s1_458, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_458;  __rev2_458 = __builtin_shufflevector(__s2_458, __s2_458, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_458; \
-  __ret_458 = __rev0_458 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_458), __noswap_splat_laneq_s16(__rev2_458, __p3_458)); \
-  __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \
-  __ret_458; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \
-  uint64x2_t __s0_459 = __p0_459; \
-  uint32x2_t __s1_459 = __p1_459; \
-  uint32x4_t __s2_459 = __p2_459; \
-  uint64x2_t __ret_459; \
-  __ret_459 = __s0_459 + vmull_u32(__s1_459, splat_laneq_u32(__s2_459, __p3_459)); \
-  __ret_459; \
-})
-#else
-#define vmlal_laneq_u32(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \
-  uint64x2_t __s0_460 = __p0_460; \
-  uint32x2_t __s1_460 = __p1_460; \
-  uint32x4_t __s2_460 = __p2_460; \
-  uint64x2_t __rev0_460;  __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \
-  uint32x2_t __rev1_460;  __rev1_460 = __builtin_shufflevector(__s1_460, __s1_460, 1, 0); \
-  uint32x4_t __rev2_460;  __rev2_460 = __builtin_shufflevector(__s2_460, __s2_460, 3, 2, 1, 0); \
-  uint64x2_t __ret_460; \
-  __ret_460 = __rev0_460 + __noswap_vmull_u32(__rev1_460, __noswap_splat_laneq_u32(__rev2_460, __p3_460)); \
-  __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \
-  __ret_460; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_u16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \
-  uint32x4_t __s0_461 = __p0_461; \
-  uint16x4_t __s1_461 = __p1_461; \
-  uint16x8_t __s2_461 = __p2_461; \
-  uint32x4_t __ret_461; \
-  __ret_461 = __s0_461 + vmull_u16(__s1_461, splat_laneq_u16(__s2_461, __p3_461)); \
-  __ret_461; \
-})
-#else
-#define vmlal_laneq_u16(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \
-  uint32x4_t __s0_462 = __p0_462; \
-  uint16x4_t __s1_462 = __p1_462; \
-  uint16x8_t __s2_462 = __p2_462; \
-  uint32x4_t __rev0_462;  __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \
-  uint16x4_t __rev1_462;  __rev1_462 = __builtin_shufflevector(__s1_462, __s1_462, 3, 2, 1, 0); \
-  uint16x8_t __rev2_462;  __rev2_462 = __builtin_shufflevector(__s2_462, __s2_462, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_462; \
-  __ret_462 = __rev0_462 + __noswap_vmull_u16(__rev1_462, __noswap_splat_laneq_u16(__rev2_462, __p3_462)); \
-  __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \
-  __ret_462; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \
-  int64x2_t __s0_463 = __p0_463; \
-  int32x2_t __s1_463 = __p1_463; \
-  int32x4_t __s2_463 = __p2_463; \
-  int64x2_t __ret_463; \
-  __ret_463 = __s0_463 + vmull_s32(__s1_463, splat_laneq_s32(__s2_463, __p3_463)); \
-  __ret_463; \
-})
-#else
-#define vmlal_laneq_s32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \
-  int64x2_t __s0_464 = __p0_464; \
-  int32x2_t __s1_464 = __p1_464; \
-  int32x4_t __s2_464 = __p2_464; \
-  int64x2_t __rev0_464;  __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 1, 0); \
-  int32x2_t __rev1_464;  __rev1_464 = __builtin_shufflevector(__s1_464, __s1_464, 1, 0); \
-  int32x4_t __rev2_464;  __rev2_464 = __builtin_shufflevector(__s2_464, __s2_464, 3, 2, 1, 0); \
-  int64x2_t __ret_464; \
-  __ret_464 = __rev0_464 + __noswap_vmull_s32(__rev1_464, __noswap_splat_laneq_s32(__rev2_464, __p3_464)); \
-  __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 1, 0); \
-  __ret_464; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_laneq_s16(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \
-  int32x4_t __s0_465 = __p0_465; \
-  int16x4_t __s1_465 = __p1_465; \
-  int16x8_t __s2_465 = __p2_465; \
-  int32x4_t __ret_465; \
-  __ret_465 = __s0_465 + vmull_s16(__s1_465, splat_laneq_s16(__s2_465, __p3_465)); \
-  __ret_465; \
-})
-#else
-#define vmlal_laneq_s16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \
-  int32x4_t __s0_466 = __p0_466; \
-  int16x4_t __s1_466 = __p1_466; \
-  int16x8_t __s2_466 = __p2_466; \
-  int32x4_t __rev0_466;  __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \
-  int16x4_t __rev1_466;  __rev1_466 = __builtin_shufflevector(__s1_466, __s1_466, 3, 2, 1, 0); \
-  int16x8_t __rev2_466;  __rev2_466 = __builtin_shufflevector(__s2_466, __s2_466, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_466; \
-  __ret_466 = __rev0_466 + __noswap_vmull_s16(__rev1_466, __noswap_splat_laneq_s16(__rev2_466, __p3_466)); \
-  __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \
-  __ret_466; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * __rev2;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1 * __p2;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u32(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \
-  uint32x4_t __s0_467 = __p0_467; \
-  uint32x4_t __s1_467 = __p1_467; \
-  uint32x4_t __s2_467 = __p2_467; \
-  uint32x4_t __ret_467; \
-  __ret_467 = __s0_467 - __s1_467 * splatq_laneq_u32(__s2_467, __p3_467); \
-  __ret_467; \
-})
-#else
-#define vmlsq_laneq_u32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \
-  uint32x4_t __s0_468 = __p0_468; \
-  uint32x4_t __s1_468 = __p1_468; \
-  uint32x4_t __s2_468 = __p2_468; \
-  uint32x4_t __rev0_468;  __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 3, 2, 1, 0); \
-  uint32x4_t __rev1_468;  __rev1_468 = __builtin_shufflevector(__s1_468, __s1_468, 3, 2, 1, 0); \
-  uint32x4_t __rev2_468;  __rev2_468 = __builtin_shufflevector(__s2_468, __s2_468, 3, 2, 1, 0); \
-  uint32x4_t __ret_468; \
-  __ret_468 = __rev0_468 - __rev1_468 * __noswap_splatq_laneq_u32(__rev2_468, __p3_468); \
-  __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 3, 2, 1, 0); \
-  __ret_468; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_u16(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \
-  uint16x8_t __s0_469 = __p0_469; \
-  uint16x8_t __s1_469 = __p1_469; \
-  uint16x8_t __s2_469 = __p2_469; \
-  uint16x8_t __ret_469; \
-  __ret_469 = __s0_469 - __s1_469 * splatq_laneq_u16(__s2_469, __p3_469); \
-  __ret_469; \
-})
-#else
-#define vmlsq_laneq_u16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \
-  uint16x8_t __s0_470 = __p0_470; \
-  uint16x8_t __s1_470 = __p1_470; \
-  uint16x8_t __s2_470 = __p2_470; \
-  uint16x8_t __rev0_470;  __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_470;  __rev1_470 = __builtin_shufflevector(__s1_470, __s1_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_470;  __rev2_470 = __builtin_shufflevector(__s2_470, __s2_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_470; \
-  __ret_470 = __rev0_470 - __rev1_470 * __noswap_splatq_laneq_u16(__rev2_470, __p3_470); \
-  __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_470; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_f32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \
-  float32x4_t __s0_471 = __p0_471; \
-  float32x4_t __s1_471 = __p1_471; \
-  float32x4_t __s2_471 = __p2_471; \
-  float32x4_t __ret_471; \
-  __ret_471 = __s0_471 - __s1_471 * splatq_laneq_f32(__s2_471, __p3_471); \
-  __ret_471; \
-})
-#else
-#define vmlsq_laneq_f32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \
-  float32x4_t __s0_472 = __p0_472; \
-  float32x4_t __s1_472 = __p1_472; \
-  float32x4_t __s2_472 = __p2_472; \
-  float32x4_t __rev0_472;  __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 3, 2, 1, 0); \
-  float32x4_t __rev1_472;  __rev1_472 = __builtin_shufflevector(__s1_472, __s1_472, 3, 2, 1, 0); \
-  float32x4_t __rev2_472;  __rev2_472 = __builtin_shufflevector(__s2_472, __s2_472, 3, 2, 1, 0); \
-  float32x4_t __ret_472; \
-  __ret_472 = __rev0_472 - __rev1_472 * __noswap_splatq_laneq_f32(__rev2_472, __p3_472); \
-  __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 3, 2, 1, 0); \
-  __ret_472; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s32(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \
-  int32x4_t __s0_473 = __p0_473; \
-  int32x4_t __s1_473 = __p1_473; \
-  int32x4_t __s2_473 = __p2_473; \
-  int32x4_t __ret_473; \
-  __ret_473 = __s0_473 - __s1_473 * splatq_laneq_s32(__s2_473, __p3_473); \
-  __ret_473; \
-})
-#else
-#define vmlsq_laneq_s32(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \
-  int32x4_t __s0_474 = __p0_474; \
-  int32x4_t __s1_474 = __p1_474; \
-  int32x4_t __s2_474 = __p2_474; \
-  int32x4_t __rev0_474;  __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 3, 2, 1, 0); \
-  int32x4_t __rev1_474;  __rev1_474 = __builtin_shufflevector(__s1_474, __s1_474, 3, 2, 1, 0); \
-  int32x4_t __rev2_474;  __rev2_474 = __builtin_shufflevector(__s2_474, __s2_474, 3, 2, 1, 0); \
-  int32x4_t __ret_474; \
-  __ret_474 = __rev0_474 - __rev1_474 * __noswap_splatq_laneq_s32(__rev2_474, __p3_474); \
-  __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 3, 2, 1, 0); \
-  __ret_474; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsq_laneq_s16(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \
-  int16x8_t __s0_475 = __p0_475; \
-  int16x8_t __s1_475 = __p1_475; \
-  int16x8_t __s2_475 = __p2_475; \
-  int16x8_t __ret_475; \
-  __ret_475 = __s0_475 - __s1_475 * splatq_laneq_s16(__s2_475, __p3_475); \
-  __ret_475; \
-})
-#else
-#define vmlsq_laneq_s16(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \
-  int16x8_t __s0_476 = __p0_476; \
-  int16x8_t __s1_476 = __p1_476; \
-  int16x8_t __s2_476 = __p2_476; \
-  int16x8_t __rev0_476;  __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_476;  __rev1_476 = __builtin_shufflevector(__s1_476, __s1_476, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_476;  __rev2_476 = __builtin_shufflevector(__s2_476, __s2_476, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_476; \
-  __ret_476 = __rev0_476 - __rev1_476 * __noswap_splatq_laneq_s16(__rev2_476, __p3_476); \
-  __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_476; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \
-  uint32x2_t __s0_477 = __p0_477; \
-  uint32x2_t __s1_477 = __p1_477; \
-  uint32x4_t __s2_477 = __p2_477; \
-  uint32x2_t __ret_477; \
-  __ret_477 = __s0_477 - __s1_477 * splat_laneq_u32(__s2_477, __p3_477); \
-  __ret_477; \
-})
-#else
-#define vmls_laneq_u32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \
-  uint32x2_t __s0_478 = __p0_478; \
-  uint32x2_t __s1_478 = __p1_478; \
-  uint32x4_t __s2_478 = __p2_478; \
-  uint32x2_t __rev0_478;  __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 1, 0); \
-  uint32x2_t __rev1_478;  __rev1_478 = __builtin_shufflevector(__s1_478, __s1_478, 1, 0); \
-  uint32x4_t __rev2_478;  __rev2_478 = __builtin_shufflevector(__s2_478, __s2_478, 3, 2, 1, 0); \
-  uint32x2_t __ret_478; \
-  __ret_478 = __rev0_478 - __rev1_478 * __noswap_splat_laneq_u32(__rev2_478, __p3_478); \
-  __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 1, 0); \
-  __ret_478; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_u16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \
-  uint16x4_t __s0_479 = __p0_479; \
-  uint16x4_t __s1_479 = __p1_479; \
-  uint16x8_t __s2_479 = __p2_479; \
-  uint16x4_t __ret_479; \
-  __ret_479 = __s0_479 - __s1_479 * splat_laneq_u16(__s2_479, __p3_479); \
-  __ret_479; \
-})
-#else
-#define vmls_laneq_u16(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \
-  uint16x4_t __s0_480 = __p0_480; \
-  uint16x4_t __s1_480 = __p1_480; \
-  uint16x8_t __s2_480 = __p2_480; \
-  uint16x4_t __rev0_480;  __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 3, 2, 1, 0); \
-  uint16x4_t __rev1_480;  __rev1_480 = __builtin_shufflevector(__s1_480, __s1_480, 3, 2, 1, 0); \
-  uint16x8_t __rev2_480;  __rev2_480 = __builtin_shufflevector(__s2_480, __s2_480, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_480; \
-  __ret_480 = __rev0_480 - __rev1_480 * __noswap_splat_laneq_u16(__rev2_480, __p3_480); \
-  __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 3, 2, 1, 0); \
-  __ret_480; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_f32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \
-  float32x2_t __s0_481 = __p0_481; \
-  float32x2_t __s1_481 = __p1_481; \
-  float32x4_t __s2_481 = __p2_481; \
-  float32x2_t __ret_481; \
-  __ret_481 = __s0_481 - __s1_481 * splat_laneq_f32(__s2_481, __p3_481); \
-  __ret_481; \
-})
-#else
-#define vmls_laneq_f32(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \
-  float32x2_t __s0_482 = __p0_482; \
-  float32x2_t __s1_482 = __p1_482; \
-  float32x4_t __s2_482 = __p2_482; \
-  float32x2_t __rev0_482;  __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \
-  float32x2_t __rev1_482;  __rev1_482 = __builtin_shufflevector(__s1_482, __s1_482, 1, 0); \
-  float32x4_t __rev2_482;  __rev2_482 = __builtin_shufflevector(__s2_482, __s2_482, 3, 2, 1, 0); \
-  float32x2_t __ret_482; \
-  __ret_482 = __rev0_482 - __rev1_482 * __noswap_splat_laneq_f32(__rev2_482, __p3_482); \
-  __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 1, 0); \
-  __ret_482; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s32(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \
-  int32x2_t __s0_483 = __p0_483; \
-  int32x2_t __s1_483 = __p1_483; \
-  int32x4_t __s2_483 = __p2_483; \
-  int32x2_t __ret_483; \
-  __ret_483 = __s0_483 - __s1_483 * splat_laneq_s32(__s2_483, __p3_483); \
-  __ret_483; \
-})
-#else
-#define vmls_laneq_s32(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \
-  int32x2_t __s0_484 = __p0_484; \
-  int32x2_t __s1_484 = __p1_484; \
-  int32x4_t __s2_484 = __p2_484; \
-  int32x2_t __rev0_484;  __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 1, 0); \
-  int32x2_t __rev1_484;  __rev1_484 = __builtin_shufflevector(__s1_484, __s1_484, 1, 0); \
-  int32x4_t __rev2_484;  __rev2_484 = __builtin_shufflevector(__s2_484, __s2_484, 3, 2, 1, 0); \
-  int32x2_t __ret_484; \
-  __ret_484 = __rev0_484 - __rev1_484 * __noswap_splat_laneq_s32(__rev2_484, __p3_484); \
-  __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 1, 0); \
-  __ret_484; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmls_laneq_s16(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \
-  int16x4_t __s0_485 = __p0_485; \
-  int16x4_t __s1_485 = __p1_485; \
-  int16x8_t __s2_485 = __p2_485; \
-  int16x4_t __ret_485; \
-  __ret_485 = __s0_485 - __s1_485 * splat_laneq_s16(__s2_485, __p3_485); \
-  __ret_485; \
-})
-#else
-#define vmls_laneq_s16(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \
-  int16x4_t __s0_486 = __p0_486; \
-  int16x4_t __s1_486 = __p1_486; \
-  int16x8_t __s2_486 = __p2_486; \
-  int16x4_t __rev0_486;  __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 3, 2, 1, 0); \
-  int16x4_t __rev1_486;  __rev1_486 = __builtin_shufflevector(__s1_486, __s1_486, 3, 2, 1, 0); \
-  int16x8_t __rev2_486;  __rev2_486 = __builtin_shufflevector(__s2_486, __s2_486, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_486; \
-  __ret_486 = __rev0_486 - __rev1_486 * __noswap_splat_laneq_s16(__rev2_486, __p3_486); \
-  __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 3, 2, 1, 0); \
-  __ret_486; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1 * (float64x2_t) {__p2, __p2};
-  return __ret;
-}
-#else
-__ai float64x2_t vmlsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1 * (float64x2_t) {__p2, __p2};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u32(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \
-  uint64x2_t __s0_487 = __p0_487; \
-  uint32x4_t __s1_487 = __p1_487; \
-  uint32x2_t __s2_487 = __p2_487; \
-  uint64x2_t __ret_487; \
-  __ret_487 = __s0_487 - vmull_u32(vget_high_u32(__s1_487), splat_lane_u32(__s2_487, __p3_487)); \
-  __ret_487; \
-})
-#else
-#define vmlsl_high_lane_u32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \
-  uint64x2_t __s0_488 = __p0_488; \
-  uint32x4_t __s1_488 = __p1_488; \
-  uint32x2_t __s2_488 = __p2_488; \
-  uint64x2_t __rev0_488;  __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \
-  uint32x4_t __rev1_488;  __rev1_488 = __builtin_shufflevector(__s1_488, __s1_488, 3, 2, 1, 0); \
-  uint32x2_t __rev2_488;  __rev2_488 = __builtin_shufflevector(__s2_488, __s2_488, 1, 0); \
-  uint64x2_t __ret_488; \
-  __ret_488 = __rev0_488 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_488), __noswap_splat_lane_u32(__rev2_488, __p3_488)); \
-  __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \
-  __ret_488; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_u16(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \
-  uint32x4_t __s0_489 = __p0_489; \
-  uint16x8_t __s1_489 = __p1_489; \
-  uint16x4_t __s2_489 = __p2_489; \
-  uint32x4_t __ret_489; \
-  __ret_489 = __s0_489 - vmull_u16(vget_high_u16(__s1_489), splat_lane_u16(__s2_489, __p3_489)); \
-  __ret_489; \
-})
-#else
-#define vmlsl_high_lane_u16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \
-  uint32x4_t __s0_490 = __p0_490; \
-  uint16x8_t __s1_490 = __p1_490; \
-  uint16x4_t __s2_490 = __p2_490; \
-  uint32x4_t __rev0_490;  __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \
-  uint16x8_t __rev1_490;  __rev1_490 = __builtin_shufflevector(__s1_490, __s1_490, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev2_490;  __rev2_490 = __builtin_shufflevector(__s2_490, __s2_490, 3, 2, 1, 0); \
-  uint32x4_t __ret_490; \
-  __ret_490 = __rev0_490 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_490), __noswap_splat_lane_u16(__rev2_490, __p3_490)); \
-  __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \
-  __ret_490; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s32(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \
-  int64x2_t __s0_491 = __p0_491; \
-  int32x4_t __s1_491 = __p1_491; \
-  int32x2_t __s2_491 = __p2_491; \
-  int64x2_t __ret_491; \
-  __ret_491 = __s0_491 - vmull_s32(vget_high_s32(__s1_491), splat_lane_s32(__s2_491, __p3_491)); \
-  __ret_491; \
-})
-#else
-#define vmlsl_high_lane_s32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \
-  int64x2_t __s0_492 = __p0_492; \
-  int32x4_t __s1_492 = __p1_492; \
-  int32x2_t __s2_492 = __p2_492; \
-  int64x2_t __rev0_492;  __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \
-  int32x4_t __rev1_492;  __rev1_492 = __builtin_shufflevector(__s1_492, __s1_492, 3, 2, 1, 0); \
-  int32x2_t __rev2_492;  __rev2_492 = __builtin_shufflevector(__s2_492, __s2_492, 1, 0); \
-  int64x2_t __ret_492; \
-  __ret_492 = __rev0_492 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_492), __noswap_splat_lane_s32(__rev2_492, __p3_492)); \
-  __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 1, 0); \
-  __ret_492; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_lane_s16(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \
-  int32x4_t __s0_493 = __p0_493; \
-  int16x8_t __s1_493 = __p1_493; \
-  int16x4_t __s2_493 = __p2_493; \
-  int32x4_t __ret_493; \
-  __ret_493 = __s0_493 - vmull_s16(vget_high_s16(__s1_493), splat_lane_s16(__s2_493, __p3_493)); \
-  __ret_493; \
-})
-#else
-#define vmlsl_high_lane_s16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \
-  int32x4_t __s0_494 = __p0_494; \
-  int16x8_t __s1_494 = __p1_494; \
-  int16x4_t __s2_494 = __p2_494; \
-  int32x4_t __rev0_494;  __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 3, 2, 1, 0); \
-  int16x8_t __rev1_494;  __rev1_494 = __builtin_shufflevector(__s1_494, __s1_494, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_494;  __rev2_494 = __builtin_shufflevector(__s2_494, __s2_494, 3, 2, 1, 0); \
-  int32x4_t __ret_494; \
-  __ret_494 = __rev0_494 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_494), __noswap_splat_lane_s16(__rev2_494, __p3_494)); \
-  __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \
-  __ret_494; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \
-  uint64x2_t __s0_495 = __p0_495; \
-  uint32x4_t __s1_495 = __p1_495; \
-  uint32x4_t __s2_495 = __p2_495; \
-  uint64x2_t __ret_495; \
-  __ret_495 = __s0_495 - vmull_u32(vget_high_u32(__s1_495), splat_laneq_u32(__s2_495, __p3_495)); \
-  __ret_495; \
-})
-#else
-#define vmlsl_high_laneq_u32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \
-  uint64x2_t __s0_496 = __p0_496; \
-  uint32x4_t __s1_496 = __p1_496; \
-  uint32x4_t __s2_496 = __p2_496; \
-  uint64x2_t __rev0_496;  __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 1, 0); \
-  uint32x4_t __rev1_496;  __rev1_496 = __builtin_shufflevector(__s1_496, __s1_496, 3, 2, 1, 0); \
-  uint32x4_t __rev2_496;  __rev2_496 = __builtin_shufflevector(__s2_496, __s2_496, 3, 2, 1, 0); \
-  uint64x2_t __ret_496; \
-  __ret_496 = __rev0_496 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_496), __noswap_splat_laneq_u32(__rev2_496, __p3_496)); \
-  __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \
-  __ret_496; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_u16(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \
-  uint32x4_t __s0_497 = __p0_497; \
-  uint16x8_t __s1_497 = __p1_497; \
-  uint16x8_t __s2_497 = __p2_497; \
-  uint32x4_t __ret_497; \
-  __ret_497 = __s0_497 - vmull_u16(vget_high_u16(__s1_497), splat_laneq_u16(__s2_497, __p3_497)); \
-  __ret_497; \
-})
-#else
-#define vmlsl_high_laneq_u16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \
-  uint32x4_t __s0_498 = __p0_498; \
-  uint16x8_t __s1_498 = __p1_498; \
-  uint16x8_t __s2_498 = __p2_498; \
-  uint32x4_t __rev0_498;  __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 3, 2, 1, 0); \
-  uint16x8_t __rev1_498;  __rev1_498 = __builtin_shufflevector(__s1_498, __s1_498, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev2_498;  __rev2_498 = __builtin_shufflevector(__s2_498, __s2_498, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_498; \
-  __ret_498 = __rev0_498 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_498), __noswap_splat_laneq_u16(__rev2_498, __p3_498)); \
-  __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \
-  __ret_498; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \
-  int64x2_t __s0_499 = __p0_499; \
-  int32x4_t __s1_499 = __p1_499; \
-  int32x4_t __s2_499 = __p2_499; \
-  int64x2_t __ret_499; \
-  __ret_499 = __s0_499 - vmull_s32(vget_high_s32(__s1_499), splat_laneq_s32(__s2_499, __p3_499)); \
-  __ret_499; \
-})
-#else
-#define vmlsl_high_laneq_s32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \
-  int64x2_t __s0_500 = __p0_500; \
-  int32x4_t __s1_500 = __p1_500; \
-  int32x4_t __s2_500 = __p2_500; \
-  int64x2_t __rev0_500;  __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 1, 0); \
-  int32x4_t __rev1_500;  __rev1_500 = __builtin_shufflevector(__s1_500, __s1_500, 3, 2, 1, 0); \
-  int32x4_t __rev2_500;  __rev2_500 = __builtin_shufflevector(__s2_500, __s2_500, 3, 2, 1, 0); \
-  int64x2_t __ret_500; \
-  __ret_500 = __rev0_500 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_500), __noswap_splat_laneq_s32(__rev2_500, __p3_500)); \
-  __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \
-  __ret_500; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_high_laneq_s16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \
-  int32x4_t __s0_501 = __p0_501; \
-  int16x8_t __s1_501 = __p1_501; \
-  int16x8_t __s2_501 = __p2_501; \
-  int32x4_t __ret_501; \
-  __ret_501 = __s0_501 - vmull_s16(vget_high_s16(__s1_501), splat_laneq_s16(__s2_501, __p3_501)); \
-  __ret_501; \
-})
-#else
-#define vmlsl_high_laneq_s16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \
-  int32x4_t __s0_502 = __p0_502; \
-  int16x8_t __s1_502 = __p1_502; \
-  int16x8_t __s2_502 = __p2_502; \
-  int32x4_t __rev0_502;  __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 3, 2, 1, 0); \
-  int16x8_t __rev1_502;  __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_502;  __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_502; \
-  __ret_502 = __rev0_502 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_502), __noswap_splat_laneq_s16(__rev2_502, __p3_502)); \
-  __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \
-  __ret_502; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \
-  uint64x2_t __s0_503 = __p0_503; \
-  uint32x2_t __s1_503 = __p1_503; \
-  uint32x4_t __s2_503 = __p2_503; \
-  uint64x2_t __ret_503; \
-  __ret_503 = __s0_503 - vmull_u32(__s1_503, splat_laneq_u32(__s2_503, __p3_503)); \
-  __ret_503; \
-})
-#else
-#define vmlsl_laneq_u32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \
-  uint64x2_t __s0_504 = __p0_504; \
-  uint32x2_t __s1_504 = __p1_504; \
-  uint32x4_t __s2_504 = __p2_504; \
-  uint64x2_t __rev0_504;  __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 1, 0); \
-  uint32x2_t __rev1_504;  __rev1_504 = __builtin_shufflevector(__s1_504, __s1_504, 1, 0); \
-  uint32x4_t __rev2_504;  __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 3, 2, 1, 0); \
-  uint64x2_t __ret_504; \
-  __ret_504 = __rev0_504 - __noswap_vmull_u32(__rev1_504, __noswap_splat_laneq_u32(__rev2_504, __p3_504)); \
-  __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \
-  __ret_504; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_u16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \
-  uint32x4_t __s0_505 = __p0_505; \
-  uint16x4_t __s1_505 = __p1_505; \
-  uint16x8_t __s2_505 = __p2_505; \
-  uint32x4_t __ret_505; \
-  __ret_505 = __s0_505 - vmull_u16(__s1_505, splat_laneq_u16(__s2_505, __p3_505)); \
-  __ret_505; \
-})
-#else
-#define vmlsl_laneq_u16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \
-  uint32x4_t __s0_506 = __p0_506; \
-  uint16x4_t __s1_506 = __p1_506; \
-  uint16x8_t __s2_506 = __p2_506; \
-  uint32x4_t __rev0_506;  __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 3, 2, 1, 0); \
-  uint16x4_t __rev1_506;  __rev1_506 = __builtin_shufflevector(__s1_506, __s1_506, 3, 2, 1, 0); \
-  uint16x8_t __rev2_506;  __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_506; \
-  __ret_506 = __rev0_506 - __noswap_vmull_u16(__rev1_506, __noswap_splat_laneq_u16(__rev2_506, __p3_506)); \
-  __ret_506 = __builtin_shufflevector(__ret_506, __ret_506, 3, 2, 1, 0); \
-  __ret_506; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \
-  int64x2_t __s0_507 = __p0_507; \
-  int32x2_t __s1_507 = __p1_507; \
-  int32x4_t __s2_507 = __p2_507; \
-  int64x2_t __ret_507; \
-  __ret_507 = __s0_507 - vmull_s32(__s1_507, splat_laneq_s32(__s2_507, __p3_507)); \
-  __ret_507; \
-})
-#else
-#define vmlsl_laneq_s32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \
-  int64x2_t __s0_508 = __p0_508; \
-  int32x2_t __s1_508 = __p1_508; \
-  int32x4_t __s2_508 = __p2_508; \
-  int64x2_t __rev0_508;  __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \
-  int32x2_t __rev1_508;  __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \
-  int32x4_t __rev2_508;  __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 3, 2, 1, 0); \
-  int64x2_t __ret_508; \
-  __ret_508 = __rev0_508 - __noswap_vmull_s32(__rev1_508, __noswap_splat_laneq_s32(__rev2_508, __p3_508)); \
-  __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \
-  __ret_508; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_laneq_s16(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \
-  int32x4_t __s0_509 = __p0_509; \
-  int16x4_t __s1_509 = __p1_509; \
-  int16x8_t __s2_509 = __p2_509; \
-  int32x4_t __ret_509; \
-  __ret_509 = __s0_509 - vmull_s16(__s1_509, splat_laneq_s16(__s2_509, __p3_509)); \
-  __ret_509; \
-})
-#else
-#define vmlsl_laneq_s16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \
-  int32x4_t __s0_510 = __p0_510; \
-  int16x4_t __s1_510 = __p1_510; \
-  int16x8_t __s2_510 = __p2_510; \
-  int32x4_t __rev0_510;  __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \
-  int16x4_t __rev1_510;  __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \
-  int16x8_t __rev2_510;  __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_510; \
-  __ret_510 = __rev0_510 - __noswap_vmull_s16(__rev1_510, __noswap_splat_laneq_s16(__rev2_510, __p3_510)); \
-  __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \
-  __ret_510; \
-})
-#endif
-
-__ai poly64x1_t vmov_n_p64(poly64_t __p0) {
-  poly64x1_t __ret;
-  __ret = (poly64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai poly64x2_t vmovq_n_p64(poly64_t __p0) {
-  poly64x2_t __ret;
-  __ret = (poly64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  return __ret;
-}
-#else
-__ai float64x2_t vmovq_n_f64(float64_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) {__p0, __p0};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmov_n_f64(float64_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) {__p0};
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_511) {
-  uint16x8_t __ret_511;
-  uint8x8_t __a1_511 = vget_high_u8(__p0_511);
-  __ret_511 = (uint16x8_t)(vshll_n_u8(__a1_511, 0));
-  return __ret_511;
-}
-#else
-__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_512) {
-  uint8x16_t __rev0_512;  __rev0_512 = __builtin_shufflevector(__p0_512, __p0_512, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret_512;
-  uint8x8_t __a1_512 = __noswap_vget_high_u8(__rev0_512);
-  __ret_512 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_512, 0));
-  __ret_512 = __builtin_shufflevector(__ret_512, __ret_512, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_512;
-}
-__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_513) {
-  uint16x8_t __ret_513;
-  uint8x8_t __a1_513 = __noswap_vget_high_u8(__p0_513);
-  __ret_513 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_513, 0));
-  return __ret_513;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_514) {
-  uint64x2_t __ret_514;
-  uint32x2_t __a1_514 = vget_high_u32(__p0_514);
-  __ret_514 = (uint64x2_t)(vshll_n_u32(__a1_514, 0));
-  return __ret_514;
-}
-#else
-__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_515) {
-  uint32x4_t __rev0_515;  __rev0_515 = __builtin_shufflevector(__p0_515, __p0_515, 3, 2, 1, 0);
-  uint64x2_t __ret_515;
-  uint32x2_t __a1_515 = __noswap_vget_high_u32(__rev0_515);
-  __ret_515 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_515, 0));
-  __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 1, 0);
-  return __ret_515;
-}
-__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_516) {
-  uint64x2_t __ret_516;
-  uint32x2_t __a1_516 = __noswap_vget_high_u32(__p0_516);
-  __ret_516 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_516, 0));
-  return __ret_516;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_517) {
-  uint32x4_t __ret_517;
-  uint16x4_t __a1_517 = vget_high_u16(__p0_517);
-  __ret_517 = (uint32x4_t)(vshll_n_u16(__a1_517, 0));
-  return __ret_517;
-}
-#else
-__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_518) {
-  uint16x8_t __rev0_518;  __rev0_518 = __builtin_shufflevector(__p0_518, __p0_518, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret_518;
-  uint16x4_t __a1_518 = __noswap_vget_high_u16(__rev0_518);
-  __ret_518 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_518, 0));
-  __ret_518 = __builtin_shufflevector(__ret_518, __ret_518, 3, 2, 1, 0);
-  return __ret_518;
-}
-__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_519) {
-  uint32x4_t __ret_519;
-  uint16x4_t __a1_519 = __noswap_vget_high_u16(__p0_519);
-  __ret_519 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_519, 0));
-  return __ret_519;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_520) {
-  int16x8_t __ret_520;
-  int8x8_t __a1_520 = vget_high_s8(__p0_520);
-  __ret_520 = (int16x8_t)(vshll_n_s8(__a1_520, 0));
-  return __ret_520;
-}
-#else
-__ai int16x8_t vmovl_high_s8(int8x16_t __p0_521) {
-  int8x16_t __rev0_521;  __rev0_521 = __builtin_shufflevector(__p0_521, __p0_521, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret_521;
-  int8x8_t __a1_521 = __noswap_vget_high_s8(__rev0_521);
-  __ret_521 = (int16x8_t)(__noswap_vshll_n_s8(__a1_521, 0));
-  __ret_521 = __builtin_shufflevector(__ret_521, __ret_521, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret_521;
-}
-__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_522) {
-  int16x8_t __ret_522;
-  int8x8_t __a1_522 = __noswap_vget_high_s8(__p0_522);
-  __ret_522 = (int16x8_t)(__noswap_vshll_n_s8(__a1_522, 0));
-  return __ret_522;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_523) {
-  int64x2_t __ret_523;
-  int32x2_t __a1_523 = vget_high_s32(__p0_523);
-  __ret_523 = (int64x2_t)(vshll_n_s32(__a1_523, 0));
-  return __ret_523;
-}
-#else
-__ai int64x2_t vmovl_high_s32(int32x4_t __p0_524) {
-  int32x4_t __rev0_524;  __rev0_524 = __builtin_shufflevector(__p0_524, __p0_524, 3, 2, 1, 0);
-  int64x2_t __ret_524;
-  int32x2_t __a1_524 = __noswap_vget_high_s32(__rev0_524);
-  __ret_524 = (int64x2_t)(__noswap_vshll_n_s32(__a1_524, 0));
-  __ret_524 = __builtin_shufflevector(__ret_524, __ret_524, 1, 0);
-  return __ret_524;
-}
-__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_525) {
-  int64x2_t __ret_525;
-  int32x2_t __a1_525 = __noswap_vget_high_s32(__p0_525);
-  __ret_525 = (int64x2_t)(__noswap_vshll_n_s32(__a1_525, 0));
-  return __ret_525;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_526) {
-  int32x4_t __ret_526;
-  int16x4_t __a1_526 = vget_high_s16(__p0_526);
-  __ret_526 = (int32x4_t)(vshll_n_s16(__a1_526, 0));
-  return __ret_526;
-}
-#else
-__ai int32x4_t vmovl_high_s16(int16x8_t __p0_527) {
-  int16x8_t __rev0_527;  __rev0_527 = __builtin_shufflevector(__p0_527, __p0_527, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret_527;
-  int16x4_t __a1_527 = __noswap_vget_high_s16(__rev0_527);
-  __ret_527 = (int32x4_t)(__noswap_vshll_n_s16(__a1_527, 0));
-  __ret_527 = __builtin_shufflevector(__ret_527, __ret_527, 3, 2, 1, 0);
-  return __ret_527;
-}
-__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_528) {
-  int32x4_t __ret_528;
-  int16x4_t __a1_528 = __noswap_vget_high_s16(__p0_528);
-  __ret_528 = (int32x4_t)(__noswap_vshll_n_s16(__a1_528, 0));
-  return __ret_528;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 * __p1;
-  return __ret;
-}
-#define vmuld_lane_f64(__p0_529, __p1_529, __p2_529) __extension__ ({ \
-  float64_t __s0_529 = __p0_529; \
-  float64x1_t __s1_529 = __p1_529; \
-  float64_t __ret_529; \
-  __ret_529 = __s0_529 * vget_lane_f64(__s1_529, __p2_529); \
-  __ret_529; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_lane_f32(__p0_530, __p1_530, __p2_530) __extension__ ({ \
-  float32_t __s0_530 = __p0_530; \
-  float32x2_t __s1_530 = __p1_530; \
-  float32_t __ret_530; \
-  __ret_530 = __s0_530 * vget_lane_f32(__s1_530, __p2_530); \
-  __ret_530; \
-})
-#else
-#define vmuls_lane_f32(__p0_531, __p1_531, __p2_531) __extension__ ({ \
-  float32_t __s0_531 = __p0_531; \
-  float32x2_t __s1_531 = __p1_531; \
-  float32x2_t __rev1_531;  __rev1_531 = __builtin_shufflevector(__s1_531, __s1_531, 1, 0); \
-  float32_t __ret_531; \
-  __ret_531 = __s0_531 * __noswap_vget_lane_f32(__rev1_531, __p2_531); \
-  __ret_531; \
-})
-#endif
-
-#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_lane_f64(__p0_532, __p1_532, __p2_532) __extension__ ({ \
-  float64x2_t __s0_532 = __p0_532; \
-  float64x1_t __s1_532 = __p1_532; \
-  float64x2_t __ret_532; \
-  __ret_532 = __s0_532 * splatq_lane_f64(__s1_532, __p2_532); \
-  __ret_532; \
-})
-#else
-#define vmulq_lane_f64(__p0_533, __p1_533, __p2_533) __extension__ ({ \
-  float64x2_t __s0_533 = __p0_533; \
-  float64x1_t __s1_533 = __p1_533; \
-  float64x2_t __rev0_533;  __rev0_533 = __builtin_shufflevector(__s0_533, __s0_533, 1, 0); \
-  float64x2_t __ret_533; \
-  __ret_533 = __rev0_533 * __noswap_splatq_lane_f64(__s1_533, __p2_533); \
-  __ret_533 = __builtin_shufflevector(__ret_533, __ret_533, 1, 0); \
-  __ret_533; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuld_laneq_f64(__p0_534, __p1_534, __p2_534) __extension__ ({ \
-  float64_t __s0_534 = __p0_534; \
-  float64x2_t __s1_534 = __p1_534; \
-  float64_t __ret_534; \
-  __ret_534 = __s0_534 * vgetq_lane_f64(__s1_534, __p2_534); \
-  __ret_534; \
-})
-#else
-#define vmuld_laneq_f64(__p0_535, __p1_535, __p2_535) __extension__ ({ \
-  float64_t __s0_535 = __p0_535; \
-  float64x2_t __s1_535 = __p1_535; \
-  float64x2_t __rev1_535;  __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 1, 0); \
-  float64_t __ret_535; \
-  __ret_535 = __s0_535 * __noswap_vgetq_lane_f64(__rev1_535, __p2_535); \
-  __ret_535; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmuls_laneq_f32(__p0_536, __p1_536, __p2_536) __extension__ ({ \
-  float32_t __s0_536 = __p0_536; \
-  float32x4_t __s1_536 = __p1_536; \
-  float32_t __ret_536; \
-  __ret_536 = __s0_536 * vgetq_lane_f32(__s1_536, __p2_536); \
-  __ret_536; \
-})
-#else
-#define vmuls_laneq_f32(__p0_537, __p1_537, __p2_537) __extension__ ({ \
-  float32_t __s0_537 = __p0_537; \
-  float32x4_t __s1_537 = __p1_537; \
-  float32x4_t __rev1_537;  __rev1_537 = __builtin_shufflevector(__s1_537, __s1_537, 3, 2, 1, 0); \
-  float32_t __ret_537; \
-  __ret_537 = __s0_537 * __noswap_vgetq_lane_f32(__rev1_537, __p2_537); \
-  __ret_537; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \
-  __ret; \
-})
-#else
-#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u32(__p0_538, __p1_538, __p2_538) __extension__ ({ \
-  uint32x4_t __s0_538 = __p0_538; \
-  uint32x4_t __s1_538 = __p1_538; \
-  uint32x4_t __ret_538; \
-  __ret_538 = __s0_538 * splatq_laneq_u32(__s1_538, __p2_538); \
-  __ret_538; \
-})
-#else
-#define vmulq_laneq_u32(__p0_539, __p1_539, __p2_539) __extension__ ({ \
-  uint32x4_t __s0_539 = __p0_539; \
-  uint32x4_t __s1_539 = __p1_539; \
-  uint32x4_t __rev0_539;  __rev0_539 = __builtin_shufflevector(__s0_539, __s0_539, 3, 2, 1, 0); \
-  uint32x4_t __rev1_539;  __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, 3, 2, 1, 0); \
-  uint32x4_t __ret_539; \
-  __ret_539 = __rev0_539 * __noswap_splatq_laneq_u32(__rev1_539, __p2_539); \
-  __ret_539 = __builtin_shufflevector(__ret_539, __ret_539, 3, 2, 1, 0); \
-  __ret_539; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_u16(__p0_540, __p1_540, __p2_540) __extension__ ({ \
-  uint16x8_t __s0_540 = __p0_540; \
-  uint16x8_t __s1_540 = __p1_540; \
-  uint16x8_t __ret_540; \
-  __ret_540 = __s0_540 * splatq_laneq_u16(__s1_540, __p2_540); \
-  __ret_540; \
-})
-#else
-#define vmulq_laneq_u16(__p0_541, __p1_541, __p2_541) __extension__ ({ \
-  uint16x8_t __s0_541 = __p0_541; \
-  uint16x8_t __s1_541 = __p1_541; \
-  uint16x8_t __rev0_541;  __rev0_541 = __builtin_shufflevector(__s0_541, __s0_541, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_541;  __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_541; \
-  __ret_541 = __rev0_541 * __noswap_splatq_laneq_u16(__rev1_541, __p2_541); \
-  __ret_541 = __builtin_shufflevector(__ret_541, __ret_541, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_541; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f64(__p0_542, __p1_542, __p2_542) __extension__ ({ \
-  float64x2_t __s0_542 = __p0_542; \
-  float64x2_t __s1_542 = __p1_542; \
-  float64x2_t __ret_542; \
-  __ret_542 = __s0_542 * splatq_laneq_f64(__s1_542, __p2_542); \
-  __ret_542; \
-})
-#else
-#define vmulq_laneq_f64(__p0_543, __p1_543, __p2_543) __extension__ ({ \
-  float64x2_t __s0_543 = __p0_543; \
-  float64x2_t __s1_543 = __p1_543; \
-  float64x2_t __rev0_543;  __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, 1, 0); \
-  float64x2_t __rev1_543;  __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, 1, 0); \
-  float64x2_t __ret_543; \
-  __ret_543 = __rev0_543 * __noswap_splatq_laneq_f64(__rev1_543, __p2_543); \
-  __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 1, 0); \
-  __ret_543; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_f32(__p0_544, __p1_544, __p2_544) __extension__ ({ \
-  float32x4_t __s0_544 = __p0_544; \
-  float32x4_t __s1_544 = __p1_544; \
-  float32x4_t __ret_544; \
-  __ret_544 = __s0_544 * splatq_laneq_f32(__s1_544, __p2_544); \
-  __ret_544; \
-})
-#else
-#define vmulq_laneq_f32(__p0_545, __p1_545, __p2_545) __extension__ ({ \
-  float32x4_t __s0_545 = __p0_545; \
-  float32x4_t __s1_545 = __p1_545; \
-  float32x4_t __rev0_545;  __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, 3, 2, 1, 0); \
-  float32x4_t __rev1_545;  __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, 3, 2, 1, 0); \
-  float32x4_t __ret_545; \
-  __ret_545 = __rev0_545 * __noswap_splatq_laneq_f32(__rev1_545, __p2_545); \
-  __ret_545 = __builtin_shufflevector(__ret_545, __ret_545, 3, 2, 1, 0); \
-  __ret_545; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s32(__p0_546, __p1_546, __p2_546) __extension__ ({ \
-  int32x4_t __s0_546 = __p0_546; \
-  int32x4_t __s1_546 = __p1_546; \
-  int32x4_t __ret_546; \
-  __ret_546 = __s0_546 * splatq_laneq_s32(__s1_546, __p2_546); \
-  __ret_546; \
-})
-#else
-#define vmulq_laneq_s32(__p0_547, __p1_547, __p2_547) __extension__ ({ \
-  int32x4_t __s0_547 = __p0_547; \
-  int32x4_t __s1_547 = __p1_547; \
-  int32x4_t __rev0_547;  __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, 3, 2, 1, 0); \
-  int32x4_t __rev1_547;  __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, 3, 2, 1, 0); \
-  int32x4_t __ret_547; \
-  __ret_547 = __rev0_547 * __noswap_splatq_laneq_s32(__rev1_547, __p2_547); \
-  __ret_547 = __builtin_shufflevector(__ret_547, __ret_547, 3, 2, 1, 0); \
-  __ret_547; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulq_laneq_s16(__p0_548, __p1_548, __p2_548) __extension__ ({ \
-  int16x8_t __s0_548 = __p0_548; \
-  int16x8_t __s1_548 = __p1_548; \
-  int16x8_t __ret_548; \
-  __ret_548 = __s0_548 * splatq_laneq_s16(__s1_548, __p2_548); \
-  __ret_548; \
-})
-#else
-#define vmulq_laneq_s16(__p0_549, __p1_549, __p2_549) __extension__ ({ \
-  int16x8_t __s0_549 = __p0_549; \
-  int16x8_t __s1_549 = __p1_549; \
-  int16x8_t __rev0_549;  __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_549;  __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_549; \
-  __ret_549 = __rev0_549 * __noswap_splatq_laneq_s16(__rev1_549, __p2_549); \
-  __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_549; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u32(__p0_550, __p1_550, __p2_550) __extension__ ({ \
-  uint32x2_t __s0_550 = __p0_550; \
-  uint32x4_t __s1_550 = __p1_550; \
-  uint32x2_t __ret_550; \
-  __ret_550 = __s0_550 * splat_laneq_u32(__s1_550, __p2_550); \
-  __ret_550; \
-})
-#else
-#define vmul_laneq_u32(__p0_551, __p1_551, __p2_551) __extension__ ({ \
-  uint32x2_t __s0_551 = __p0_551; \
-  uint32x4_t __s1_551 = __p1_551; \
-  uint32x2_t __rev0_551;  __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, 1, 0); \
-  uint32x4_t __rev1_551;  __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, 3, 2, 1, 0); \
-  uint32x2_t __ret_551; \
-  __ret_551 = __rev0_551 * __noswap_splat_laneq_u32(__rev1_551, __p2_551); \
-  __ret_551 = __builtin_shufflevector(__ret_551, __ret_551, 1, 0); \
-  __ret_551; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_u16(__p0_552, __p1_552, __p2_552) __extension__ ({ \
-  uint16x4_t __s0_552 = __p0_552; \
-  uint16x8_t __s1_552 = __p1_552; \
-  uint16x4_t __ret_552; \
-  __ret_552 = __s0_552 * splat_laneq_u16(__s1_552, __p2_552); \
-  __ret_552; \
-})
-#else
-#define vmul_laneq_u16(__p0_553, __p1_553, __p2_553) __extension__ ({ \
-  uint16x4_t __s0_553 = __p0_553; \
-  uint16x8_t __s1_553 = __p1_553; \
-  uint16x4_t __rev0_553;  __rev0_553 = __builtin_shufflevector(__s0_553, __s0_553, 3, 2, 1, 0); \
-  uint16x8_t __rev1_553;  __rev1_553 = __builtin_shufflevector(__s1_553, __s1_553, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __ret_553; \
-  __ret_553 = __rev0_553 * __noswap_splat_laneq_u16(__rev1_553, __p2_553); \
-  __ret_553 = __builtin_shufflevector(__ret_553, __ret_553, 3, 2, 1, 0); \
-  __ret_553; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_f32(__p0_554, __p1_554, __p2_554) __extension__ ({ \
-  float32x2_t __s0_554 = __p0_554; \
-  float32x4_t __s1_554 = __p1_554; \
-  float32x2_t __ret_554; \
-  __ret_554 = __s0_554 * splat_laneq_f32(__s1_554, __p2_554); \
-  __ret_554; \
-})
-#else
-#define vmul_laneq_f32(__p0_555, __p1_555, __p2_555) __extension__ ({ \
-  float32x2_t __s0_555 = __p0_555; \
-  float32x4_t __s1_555 = __p1_555; \
-  float32x2_t __rev0_555;  __rev0_555 = __builtin_shufflevector(__s0_555, __s0_555, 1, 0); \
-  float32x4_t __rev1_555;  __rev1_555 = __builtin_shufflevector(__s1_555, __s1_555, 3, 2, 1, 0); \
-  float32x2_t __ret_555; \
-  __ret_555 = __rev0_555 * __noswap_splat_laneq_f32(__rev1_555, __p2_555); \
-  __ret_555 = __builtin_shufflevector(__ret_555, __ret_555, 1, 0); \
-  __ret_555; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s32(__p0_556, __p1_556, __p2_556) __extension__ ({ \
-  int32x2_t __s0_556 = __p0_556; \
-  int32x4_t __s1_556 = __p1_556; \
-  int32x2_t __ret_556; \
-  __ret_556 = __s0_556 * splat_laneq_s32(__s1_556, __p2_556); \
-  __ret_556; \
-})
-#else
-#define vmul_laneq_s32(__p0_557, __p1_557, __p2_557) __extension__ ({ \
-  int32x2_t __s0_557 = __p0_557; \
-  int32x4_t __s1_557 = __p1_557; \
-  int32x2_t __rev0_557;  __rev0_557 = __builtin_shufflevector(__s0_557, __s0_557, 1, 0); \
-  int32x4_t __rev1_557;  __rev1_557 = __builtin_shufflevector(__s1_557, __s1_557, 3, 2, 1, 0); \
-  int32x2_t __ret_557; \
-  __ret_557 = __rev0_557 * __noswap_splat_laneq_s32(__rev1_557, __p2_557); \
-  __ret_557 = __builtin_shufflevector(__ret_557, __ret_557, 1, 0); \
-  __ret_557; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmul_laneq_s16(__p0_558, __p1_558, __p2_558) __extension__ ({ \
-  int16x4_t __s0_558 = __p0_558; \
-  int16x8_t __s1_558 = __p1_558; \
-  int16x4_t __ret_558; \
-  __ret_558 = __s0_558 * splat_laneq_s16(__s1_558, __p2_558); \
-  __ret_558; \
-})
-#else
-#define vmul_laneq_s16(__p0_559, __p1_559, __p2_559) __extension__ ({ \
-  int16x4_t __s0_559 = __p0_559; \
-  int16x8_t __s1_559 = __p1_559; \
-  int16x4_t __rev0_559;  __rev0_559 = __builtin_shufflevector(__s0_559, __s0_559, 3, 2, 1, 0); \
-  int16x8_t __rev1_559;  __rev1_559 = __builtin_shufflevector(__s1_559, __s1_559, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret_559; \
-  __ret_559 = __rev0_559 * __noswap_splat_laneq_s16(__rev1_559, __p2_559); \
-  __ret_559 = __builtin_shufflevector(__ret_559, __ret_559, 3, 2, 1, 0); \
-  __ret_559; \
-})
-#endif
-
-__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 * (float64x2_t) {__p1, __p1};
-  return __ret;
-}
-#else
-__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 * (float64x2_t) {__p1, __p1};
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) {
-  poly128_t __ret;
-  __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly16x8_t __ret;
-  __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1));
-  return __ret;
-}
-#else
-__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1)));
-  return __ret;
-}
-#else
-__ai poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly128_t __ret;
-  __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1)));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u32(__p0_560, __p1_560, __p2_560) __extension__ ({ \
-  uint32x4_t __s0_560 = __p0_560; \
-  uint32x2_t __s1_560 = __p1_560; \
-  uint64x2_t __ret_560; \
-  __ret_560 = vmull_u32(vget_high_u32(__s0_560), splat_lane_u32(__s1_560, __p2_560)); \
-  __ret_560; \
-})
-#else
-#define vmull_high_lane_u32(__p0_561, __p1_561, __p2_561) __extension__ ({ \
-  uint32x4_t __s0_561 = __p0_561; \
-  uint32x2_t __s1_561 = __p1_561; \
-  uint32x4_t __rev0_561;  __rev0_561 = __builtin_shufflevector(__s0_561, __s0_561, 3, 2, 1, 0); \
-  uint32x2_t __rev1_561;  __rev1_561 = __builtin_shufflevector(__s1_561, __s1_561, 1, 0); \
-  uint64x2_t __ret_561; \
-  __ret_561 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_561), __noswap_splat_lane_u32(__rev1_561, __p2_561)); \
-  __ret_561 = __builtin_shufflevector(__ret_561, __ret_561, 1, 0); \
-  __ret_561; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_u16(__p0_562, __p1_562, __p2_562) __extension__ ({ \
-  uint16x8_t __s0_562 = __p0_562; \
-  uint16x4_t __s1_562 = __p1_562; \
-  uint32x4_t __ret_562; \
-  __ret_562 = vmull_u16(vget_high_u16(__s0_562), splat_lane_u16(__s1_562, __p2_562)); \
-  __ret_562; \
-})
-#else
-#define vmull_high_lane_u16(__p0_563, __p1_563, __p2_563) __extension__ ({ \
-  uint16x8_t __s0_563 = __p0_563; \
-  uint16x4_t __s1_563 = __p1_563; \
-  uint16x8_t __rev0_563;  __rev0_563 = __builtin_shufflevector(__s0_563, __s0_563, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x4_t __rev1_563;  __rev1_563 = __builtin_shufflevector(__s1_563, __s1_563, 3, 2, 1, 0); \
-  uint32x4_t __ret_563; \
-  __ret_563 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_563), __noswap_splat_lane_u16(__rev1_563, __p2_563)); \
-  __ret_563 = __builtin_shufflevector(__ret_563, __ret_563, 3, 2, 1, 0); \
-  __ret_563; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s32(__p0_564, __p1_564, __p2_564) __extension__ ({ \
-  int32x4_t __s0_564 = __p0_564; \
-  int32x2_t __s1_564 = __p1_564; \
-  int64x2_t __ret_564; \
-  __ret_564 = vmull_s32(vget_high_s32(__s0_564), splat_lane_s32(__s1_564, __p2_564)); \
-  __ret_564; \
-})
-#else
-#define vmull_high_lane_s32(__p0_565, __p1_565, __p2_565) __extension__ ({ \
-  int32x4_t __s0_565 = __p0_565; \
-  int32x2_t __s1_565 = __p1_565; \
-  int32x4_t __rev0_565;  __rev0_565 = __builtin_shufflevector(__s0_565, __s0_565, 3, 2, 1, 0); \
-  int32x2_t __rev1_565;  __rev1_565 = __builtin_shufflevector(__s1_565, __s1_565, 1, 0); \
-  int64x2_t __ret_565; \
-  __ret_565 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_565), __noswap_splat_lane_s32(__rev1_565, __p2_565)); \
-  __ret_565 = __builtin_shufflevector(__ret_565, __ret_565, 1, 0); \
-  __ret_565; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_lane_s16(__p0_566, __p1_566, __p2_566) __extension__ ({ \
-  int16x8_t __s0_566 = __p0_566; \
-  int16x4_t __s1_566 = __p1_566; \
-  int32x4_t __ret_566; \
-  __ret_566 = vmull_s16(vget_high_s16(__s0_566), splat_lane_s16(__s1_566, __p2_566)); \
-  __ret_566; \
-})
-#else
-#define vmull_high_lane_s16(__p0_567, __p1_567, __p2_567) __extension__ ({ \
-  int16x8_t __s0_567 = __p0_567; \
-  int16x4_t __s1_567 = __p1_567; \
-  int16x8_t __rev0_567;  __rev0_567 = __builtin_shufflevector(__s0_567, __s0_567, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_567;  __rev1_567 = __builtin_shufflevector(__s1_567, __s1_567, 3, 2, 1, 0); \
-  int32x4_t __ret_567; \
-  __ret_567 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_567), __noswap_splat_lane_s16(__rev1_567, __p2_567)); \
-  __ret_567 = __builtin_shufflevector(__ret_567, __ret_567, 3, 2, 1, 0); \
-  __ret_567; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u32(__p0_568, __p1_568, __p2_568) __extension__ ({ \
-  uint32x4_t __s0_568 = __p0_568; \
-  uint32x4_t __s1_568 = __p1_568; \
-  uint64x2_t __ret_568; \
-  __ret_568 = vmull_u32(vget_high_u32(__s0_568), splat_laneq_u32(__s1_568, __p2_568)); \
-  __ret_568; \
-})
-#else
-#define vmull_high_laneq_u32(__p0_569, __p1_569, __p2_569) __extension__ ({ \
-  uint32x4_t __s0_569 = __p0_569; \
-  uint32x4_t __s1_569 = __p1_569; \
-  uint32x4_t __rev0_569;  __rev0_569 = __builtin_shufflevector(__s0_569, __s0_569, 3, 2, 1, 0); \
-  uint32x4_t __rev1_569;  __rev1_569 = __builtin_shufflevector(__s1_569, __s1_569, 3, 2, 1, 0); \
-  uint64x2_t __ret_569; \
-  __ret_569 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_569), __noswap_splat_laneq_u32(__rev1_569, __p2_569)); \
-  __ret_569 = __builtin_shufflevector(__ret_569, __ret_569, 1, 0); \
-  __ret_569; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_u16(__p0_570, __p1_570, __p2_570) __extension__ ({ \
-  uint16x8_t __s0_570 = __p0_570; \
-  uint16x8_t __s1_570 = __p1_570; \
-  uint32x4_t __ret_570; \
-  __ret_570 = vmull_u16(vget_high_u16(__s0_570), splat_laneq_u16(__s1_570, __p2_570)); \
-  __ret_570; \
-})
-#else
-#define vmull_high_laneq_u16(__p0_571, __p1_571, __p2_571) __extension__ ({ \
-  uint16x8_t __s0_571 = __p0_571; \
-  uint16x8_t __s1_571 = __p1_571; \
-  uint16x8_t __rev0_571;  __rev0_571 = __builtin_shufflevector(__s0_571, __s0_571, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_571;  __rev1_571 = __builtin_shufflevector(__s1_571, __s1_571, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_571; \
-  __ret_571 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_571), __noswap_splat_laneq_u16(__rev1_571, __p2_571)); \
-  __ret_571 = __builtin_shufflevector(__ret_571, __ret_571, 3, 2, 1, 0); \
-  __ret_571; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s32(__p0_572, __p1_572, __p2_572) __extension__ ({ \
-  int32x4_t __s0_572 = __p0_572; \
-  int32x4_t __s1_572 = __p1_572; \
-  int64x2_t __ret_572; \
-  __ret_572 = vmull_s32(vget_high_s32(__s0_572), splat_laneq_s32(__s1_572, __p2_572)); \
-  __ret_572; \
-})
-#else
-#define vmull_high_laneq_s32(__p0_573, __p1_573, __p2_573) __extension__ ({ \
-  int32x4_t __s0_573 = __p0_573; \
-  int32x4_t __s1_573 = __p1_573; \
-  int32x4_t __rev0_573;  __rev0_573 = __builtin_shufflevector(__s0_573, __s0_573, 3, 2, 1, 0); \
-  int32x4_t __rev1_573;  __rev1_573 = __builtin_shufflevector(__s1_573, __s1_573, 3, 2, 1, 0); \
-  int64x2_t __ret_573; \
-  __ret_573 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_573), __noswap_splat_laneq_s32(__rev1_573, __p2_573)); \
-  __ret_573 = __builtin_shufflevector(__ret_573, __ret_573, 1, 0); \
-  __ret_573; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_high_laneq_s16(__p0_574, __p1_574, __p2_574) __extension__ ({ \
-  int16x8_t __s0_574 = __p0_574; \
-  int16x8_t __s1_574 = __p1_574; \
-  int32x4_t __ret_574; \
-  __ret_574 = vmull_s16(vget_high_s16(__s0_574), splat_laneq_s16(__s1_574, __p2_574)); \
-  __ret_574; \
-})
-#else
-#define vmull_high_laneq_s16(__p0_575, __p1_575, __p2_575) __extension__ ({ \
-  int16x8_t __s0_575 = __p0_575; \
-  int16x8_t __s1_575 = __p1_575; \
-  int16x8_t __rev0_575;  __rev0_575 = __builtin_shufflevector(__s0_575, __s0_575, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_575;  __rev1_575 = __builtin_shufflevector(__s1_575, __s1_575, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_575; \
-  __ret_575 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_575), __noswap_splat_laneq_s16(__rev1_575, __p2_575)); \
-  __ret_575 = __builtin_shufflevector(__ret_575, __ret_575, 3, 2, 1, 0); \
-  __ret_575; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmull_n_u32(vget_high_u32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmull_n_u16(vget_high_u16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u32(__p0_576, __p1_576, __p2_576) __extension__ ({ \
-  uint32x2_t __s0_576 = __p0_576; \
-  uint32x4_t __s1_576 = __p1_576; \
-  uint64x2_t __ret_576; \
-  __ret_576 = vmull_u32(__s0_576, splat_laneq_u32(__s1_576, __p2_576)); \
-  __ret_576; \
-})
-#else
-#define vmull_laneq_u32(__p0_577, __p1_577, __p2_577) __extension__ ({ \
-  uint32x2_t __s0_577 = __p0_577; \
-  uint32x4_t __s1_577 = __p1_577; \
-  uint32x2_t __rev0_577;  __rev0_577 = __builtin_shufflevector(__s0_577, __s0_577, 1, 0); \
-  uint32x4_t __rev1_577;  __rev1_577 = __builtin_shufflevector(__s1_577, __s1_577, 3, 2, 1, 0); \
-  uint64x2_t __ret_577; \
-  __ret_577 = __noswap_vmull_u32(__rev0_577, __noswap_splat_laneq_u32(__rev1_577, __p2_577)); \
-  __ret_577 = __builtin_shufflevector(__ret_577, __ret_577, 1, 0); \
-  __ret_577; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_u16(__p0_578, __p1_578, __p2_578) __extension__ ({ \
-  uint16x4_t __s0_578 = __p0_578; \
-  uint16x8_t __s1_578 = __p1_578; \
-  uint32x4_t __ret_578; \
-  __ret_578 = vmull_u16(__s0_578, splat_laneq_u16(__s1_578, __p2_578)); \
-  __ret_578; \
-})
-#else
-#define vmull_laneq_u16(__p0_579, __p1_579, __p2_579) __extension__ ({ \
-  uint16x4_t __s0_579 = __p0_579; \
-  uint16x8_t __s1_579 = __p1_579; \
-  uint16x4_t __rev0_579;  __rev0_579 = __builtin_shufflevector(__s0_579, __s0_579, 3, 2, 1, 0); \
-  uint16x8_t __rev1_579;  __rev1_579 = __builtin_shufflevector(__s1_579, __s1_579, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_579; \
-  __ret_579 = __noswap_vmull_u16(__rev0_579, __noswap_splat_laneq_u16(__rev1_579, __p2_579)); \
-  __ret_579 = __builtin_shufflevector(__ret_579, __ret_579, 3, 2, 1, 0); \
-  __ret_579; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s32(__p0_580, __p1_580, __p2_580) __extension__ ({ \
-  int32x2_t __s0_580 = __p0_580; \
-  int32x4_t __s1_580 = __p1_580; \
-  int64x2_t __ret_580; \
-  __ret_580 = vmull_s32(__s0_580, splat_laneq_s32(__s1_580, __p2_580)); \
-  __ret_580; \
-})
-#else
-#define vmull_laneq_s32(__p0_581, __p1_581, __p2_581) __extension__ ({ \
-  int32x2_t __s0_581 = __p0_581; \
-  int32x4_t __s1_581 = __p1_581; \
-  int32x2_t __rev0_581;  __rev0_581 = __builtin_shufflevector(__s0_581, __s0_581, 1, 0); \
-  int32x4_t __rev1_581;  __rev1_581 = __builtin_shufflevector(__s1_581, __s1_581, 3, 2, 1, 0); \
-  int64x2_t __ret_581; \
-  __ret_581 = __noswap_vmull_s32(__rev0_581, __noswap_splat_laneq_s32(__rev1_581, __p2_581)); \
-  __ret_581 = __builtin_shufflevector(__ret_581, __ret_581, 1, 0); \
-  __ret_581; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmull_laneq_s16(__p0_582, __p1_582, __p2_582) __extension__ ({ \
-  int16x4_t __s0_582 = __p0_582; \
-  int16x8_t __s1_582 = __p1_582; \
-  int32x4_t __ret_582; \
-  __ret_582 = vmull_s16(__s0_582, splat_laneq_s16(__s1_582, __p2_582)); \
-  __ret_582; \
-})
-#else
-#define vmull_laneq_s16(__p0_583, __p1_583, __p2_583) __extension__ ({ \
-  int16x4_t __s0_583 = __p0_583; \
-  int16x8_t __s1_583 = __p1_583; \
-  int16x4_t __rev0_583;  __rev0_583 = __builtin_shufflevector(__s0_583, __s0_583, 3, 2, 1, 0); \
-  int16x8_t __rev1_583;  __rev1_583 = __builtin_shufflevector(__s1_583, __s1_583, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_583; \
-  __ret_583 = __noswap_vmull_s16(__rev0_583, __noswap_splat_laneq_s16(__rev1_583, __p2_583)); \
-  __ret_583 = __builtin_shufflevector(__ret_583, __ret_583, 3, 2, 1, 0); \
-  __ret_583; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#endif
-
-__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1);
-  return __ret;
-}
-#define vmulxd_lane_f64(__p0_584, __p1_584, __p2_584) __extension__ ({ \
-  float64_t __s0_584 = __p0_584; \
-  float64x1_t __s1_584 = __p1_584; \
-  float64_t __ret_584; \
-  __ret_584 = vmulxd_f64(__s0_584, vget_lane_f64(__s1_584, __p2_584)); \
-  __ret_584; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_lane_f32(__p0_585, __p1_585, __p2_585) __extension__ ({ \
-  float32_t __s0_585 = __p0_585; \
-  float32x2_t __s1_585 = __p1_585; \
-  float32_t __ret_585; \
-  __ret_585 = vmulxs_f32(__s0_585, vget_lane_f32(__s1_585, __p2_585)); \
-  __ret_585; \
-})
-#else
-#define vmulxs_lane_f32(__p0_586, __p1_586, __p2_586) __extension__ ({ \
-  float32_t __s0_586 = __p0_586; \
-  float32x2_t __s1_586 = __p1_586; \
-  float32x2_t __rev1_586;  __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \
-  float32_t __ret_586; \
-  __ret_586 = vmulxs_f32(__s0_586, __noswap_vget_lane_f32(__rev1_586, __p2_586)); \
-  __ret_586; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f64(__p0_587, __p1_587, __p2_587) __extension__ ({ \
-  float64x2_t __s0_587 = __p0_587; \
-  float64x1_t __s1_587 = __p1_587; \
-  float64x2_t __ret_587; \
-  __ret_587 = vmulxq_f64(__s0_587, splatq_lane_f64(__s1_587, __p2_587)); \
-  __ret_587; \
-})
-#else
-#define vmulxq_lane_f64(__p0_588, __p1_588, __p2_588) __extension__ ({ \
-  float64x2_t __s0_588 = __p0_588; \
-  float64x1_t __s1_588 = __p1_588; \
-  float64x2_t __rev0_588;  __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 1, 0); \
-  float64x2_t __ret_588; \
-  __ret_588 = __noswap_vmulxq_f64(__rev0_588, __noswap_splatq_lane_f64(__s1_588, __p2_588)); \
-  __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 1, 0); \
-  __ret_588; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_lane_f32(__p0_589, __p1_589, __p2_589) __extension__ ({ \
-  float32x4_t __s0_589 = __p0_589; \
-  float32x2_t __s1_589 = __p1_589; \
-  float32x4_t __ret_589; \
-  __ret_589 = vmulxq_f32(__s0_589, splatq_lane_f32(__s1_589, __p2_589)); \
-  __ret_589; \
-})
-#else
-#define vmulxq_lane_f32(__p0_590, __p1_590, __p2_590) __extension__ ({ \
-  float32x4_t __s0_590 = __p0_590; \
-  float32x2_t __s1_590 = __p1_590; \
-  float32x4_t __rev0_590;  __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \
-  float32x2_t __rev1_590;  __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \
-  float32x4_t __ret_590; \
-  __ret_590 = __noswap_vmulxq_f32(__rev0_590, __noswap_splatq_lane_f32(__rev1_590, __p2_590)); \
-  __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 3, 2, 1, 0); \
-  __ret_590; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_lane_f32(__p0_591, __p1_591, __p2_591) __extension__ ({ \
-  float32x2_t __s0_591 = __p0_591; \
-  float32x2_t __s1_591 = __p1_591; \
-  float32x2_t __ret_591; \
-  __ret_591 = vmulx_f32(__s0_591, splat_lane_f32(__s1_591, __p2_591)); \
-  __ret_591; \
-})
-#else
-#define vmulx_lane_f32(__p0_592, __p1_592, __p2_592) __extension__ ({ \
-  float32x2_t __s0_592 = __p0_592; \
-  float32x2_t __s1_592 = __p1_592; \
-  float32x2_t __rev0_592;  __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \
-  float32x2_t __rev1_592;  __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 1, 0); \
-  float32x2_t __ret_592; \
-  __ret_592 = __noswap_vmulx_f32(__rev0_592, __noswap_splat_lane_f32(__rev1_592, __p2_592)); \
-  __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \
-  __ret_592; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxd_laneq_f64(__p0_593, __p1_593, __p2_593) __extension__ ({ \
-  float64_t __s0_593 = __p0_593; \
-  float64x2_t __s1_593 = __p1_593; \
-  float64_t __ret_593; \
-  __ret_593 = vmulxd_f64(__s0_593, vgetq_lane_f64(__s1_593, __p2_593)); \
-  __ret_593; \
-})
-#else
-#define vmulxd_laneq_f64(__p0_594, __p1_594, __p2_594) __extension__ ({ \
-  float64_t __s0_594 = __p0_594; \
-  float64x2_t __s1_594 = __p1_594; \
-  float64x2_t __rev1_594;  __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 1, 0); \
-  float64_t __ret_594; \
-  __ret_594 = vmulxd_f64(__s0_594, __noswap_vgetq_lane_f64(__rev1_594, __p2_594)); \
-  __ret_594; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxs_laneq_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \
-  float32_t __s0_595 = __p0_595; \
-  float32x4_t __s1_595 = __p1_595; \
-  float32_t __ret_595; \
-  __ret_595 = vmulxs_f32(__s0_595, vgetq_lane_f32(__s1_595, __p2_595)); \
-  __ret_595; \
-})
-#else
-#define vmulxs_laneq_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \
-  float32_t __s0_596 = __p0_596; \
-  float32x4_t __s1_596 = __p1_596; \
-  float32x4_t __rev1_596;  __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 3, 2, 1, 0); \
-  float32_t __ret_596; \
-  __ret_596 = vmulxs_f32(__s0_596, __noswap_vgetq_lane_f32(__rev1_596, __p2_596)); \
-  __ret_596; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f64(__p0_597, __p1_597, __p2_597) __extension__ ({ \
-  float64x2_t __s0_597 = __p0_597; \
-  float64x2_t __s1_597 = __p1_597; \
-  float64x2_t __ret_597; \
-  __ret_597 = vmulxq_f64(__s0_597, splatq_laneq_f64(__s1_597, __p2_597)); \
-  __ret_597; \
-})
-#else
-#define vmulxq_laneq_f64(__p0_598, __p1_598, __p2_598) __extension__ ({ \
-  float64x2_t __s0_598 = __p0_598; \
-  float64x2_t __s1_598 = __p1_598; \
-  float64x2_t __rev0_598;  __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 1, 0); \
-  float64x2_t __rev1_598;  __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 1, 0); \
-  float64x2_t __ret_598; \
-  __ret_598 = __noswap_vmulxq_f64(__rev0_598, __noswap_splatq_laneq_f64(__rev1_598, __p2_598)); \
-  __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 1, 0); \
-  __ret_598; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulxq_laneq_f32(__p0_599, __p1_599, __p2_599) __extension__ ({ \
-  float32x4_t __s0_599 = __p0_599; \
-  float32x4_t __s1_599 = __p1_599; \
-  float32x4_t __ret_599; \
-  __ret_599 = vmulxq_f32(__s0_599, splatq_laneq_f32(__s1_599, __p2_599)); \
-  __ret_599; \
-})
-#else
-#define vmulxq_laneq_f32(__p0_600, __p1_600, __p2_600) __extension__ ({ \
-  float32x4_t __s0_600 = __p0_600; \
-  float32x4_t __s1_600 = __p1_600; \
-  float32x4_t __rev0_600;  __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 3, 2, 1, 0); \
-  float32x4_t __rev1_600;  __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \
-  float32x4_t __ret_600; \
-  __ret_600 = __noswap_vmulxq_f32(__rev0_600, __noswap_splatq_laneq_f32(__rev1_600, __p2_600)); \
-  __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 3, 2, 1, 0); \
-  __ret_600; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f32(__p0_601, __p1_601, __p2_601) __extension__ ({ \
-  float32x2_t __s0_601 = __p0_601; \
-  float32x4_t __s1_601 = __p1_601; \
-  float32x2_t __ret_601; \
-  __ret_601 = vmulx_f32(__s0_601, splat_laneq_f32(__s1_601, __p2_601)); \
-  __ret_601; \
-})
-#else
-#define vmulx_laneq_f32(__p0_602, __p1_602, __p2_602) __extension__ ({ \
-  float32x2_t __s0_602 = __p0_602; \
-  float32x4_t __s1_602 = __p1_602; \
-  float32x2_t __rev0_602;  __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \
-  float32x4_t __rev1_602;  __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \
-  float32x2_t __ret_602; \
-  __ret_602 = __noswap_vmulx_f32(__rev0_602, __noswap_splat_laneq_f32(__rev1_602, __p2_602)); \
-  __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \
-  __ret_602; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai float64x2_t vnegq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-#else
-__ai int64x2_t vnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = -__rev0;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vneg_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64x1_t vneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = -__p0;
-  return __ret;
-}
-__ai int64_t vnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vnegd_s64(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0);
-  return __ret;
-}
-#else
-__ai uint64_t vpaddd_u64(uint64x2_t __p0) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpaddd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0);
-  return __ret;
-}
-#else
-__ai int64_t vpaddd_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpadds_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxs_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmaxnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpmins_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0);
-  return __ret;
-}
-#else
-__ai float64_t vpminnmqd_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0);
-  return __ret;
-}
-#else
-__ai float32_t vpminnms_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqabsq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqabs_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqabsb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqabss_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqabss_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqabsd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqabsh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0);
-  return __ret;
-}
-__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s32(__p0_603, __p1_603, __p2_603, __p3_603) __extension__ ({ \
-  int64x2_t __s0_603 = __p0_603; \
-  int32x4_t __s1_603 = __p1_603; \
-  int32x2_t __s2_603 = __p2_603; \
-  int64x2_t __ret_603; \
-  __ret_603 = vqdmlal_s32(__s0_603, vget_high_s32(__s1_603), splat_lane_s32(__s2_603, __p3_603)); \
-  __ret_603; \
-})
-#else
-#define vqdmlal_high_lane_s32(__p0_604, __p1_604, __p2_604, __p3_604) __extension__ ({ \
-  int64x2_t __s0_604 = __p0_604; \
-  int32x4_t __s1_604 = __p1_604; \
-  int32x2_t __s2_604 = __p2_604; \
-  int64x2_t __rev0_604;  __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 1, 0); \
-  int32x4_t __rev1_604;  __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \
-  int32x2_t __rev2_604;  __rev2_604 = __builtin_shufflevector(__s2_604, __s2_604, 1, 0); \
-  int64x2_t __ret_604; \
-  __ret_604 = __noswap_vqdmlal_s32(__rev0_604, __noswap_vget_high_s32(__rev1_604), __noswap_splat_lane_s32(__rev2_604, __p3_604)); \
-  __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 1, 0); \
-  __ret_604; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_lane_s16(__p0_605, __p1_605, __p2_605, __p3_605) __extension__ ({ \
-  int32x4_t __s0_605 = __p0_605; \
-  int16x8_t __s1_605 = __p1_605; \
-  int16x4_t __s2_605 = __p2_605; \
-  int32x4_t __ret_605; \
-  __ret_605 = vqdmlal_s16(__s0_605, vget_high_s16(__s1_605), splat_lane_s16(__s2_605, __p3_605)); \
-  __ret_605; \
-})
-#else
-#define vqdmlal_high_lane_s16(__p0_606, __p1_606, __p2_606, __p3_606) __extension__ ({ \
-  int32x4_t __s0_606 = __p0_606; \
-  int16x8_t __s1_606 = __p1_606; \
-  int16x4_t __s2_606 = __p2_606; \
-  int32x4_t __rev0_606;  __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 3, 2, 1, 0); \
-  int16x8_t __rev1_606;  __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_606;  __rev2_606 = __builtin_shufflevector(__s2_606, __s2_606, 3, 2, 1, 0); \
-  int32x4_t __ret_606; \
-  __ret_606 = __noswap_vqdmlal_s16(__rev0_606, __noswap_vget_high_s16(__rev1_606), __noswap_splat_lane_s16(__rev2_606, __p3_606)); \
-  __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 3, 2, 1, 0); \
-  __ret_606; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \
-  int64x2_t __s0_607 = __p0_607; \
-  int32x4_t __s1_607 = __p1_607; \
-  int32x4_t __s2_607 = __p2_607; \
-  int64x2_t __ret_607; \
-  __ret_607 = vqdmlal_s32(__s0_607, vget_high_s32(__s1_607), splat_laneq_s32(__s2_607, __p3_607)); \
-  __ret_607; \
-})
-#else
-#define vqdmlal_high_laneq_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \
-  int64x2_t __s0_608 = __p0_608; \
-  int32x4_t __s1_608 = __p1_608; \
-  int32x4_t __s2_608 = __p2_608; \
-  int64x2_t __rev0_608;  __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \
-  int32x4_t __rev1_608;  __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \
-  int32x4_t __rev2_608;  __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 3, 2, 1, 0); \
-  int64x2_t __ret_608; \
-  __ret_608 = __noswap_vqdmlal_s32(__rev0_608, __noswap_vget_high_s32(__rev1_608), __noswap_splat_laneq_s32(__rev2_608, __p3_608)); \
-  __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \
-  __ret_608; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_high_laneq_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \
-  int32x4_t __s0_609 = __p0_609; \
-  int16x8_t __s1_609 = __p1_609; \
-  int16x8_t __s2_609 = __p2_609; \
-  int32x4_t __ret_609; \
-  __ret_609 = vqdmlal_s16(__s0_609, vget_high_s16(__s1_609), splat_laneq_s16(__s2_609, __p3_609)); \
-  __ret_609; \
-})
-#else
-#define vqdmlal_high_laneq_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \
-  int32x4_t __s0_610 = __p0_610; \
-  int16x8_t __s1_610 = __p1_610; \
-  int16x8_t __s2_610 = __p2_610; \
-  int32x4_t __rev0_610;  __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \
-  int16x8_t __rev1_610;  __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_610;  __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_610; \
-  __ret_610 = __noswap_vqdmlal_s16(__rev0_610, __noswap_vget_high_s16(__rev1_610), __noswap_splat_laneq_s16(__rev2_610, __p3_610)); \
-  __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \
-  __ret_610; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \
-  int64x2_t __s0_611 = __p0_611; \
-  int32x2_t __s1_611 = __p1_611; \
-  int32x4_t __s2_611 = __p2_611; \
-  int64x2_t __ret_611; \
-  __ret_611 = vqdmlal_s32(__s0_611, __s1_611, splat_laneq_s32(__s2_611, __p3_611)); \
-  __ret_611; \
-})
-#else
-#define vqdmlal_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \
-  int64x2_t __s0_612 = __p0_612; \
-  int32x2_t __s1_612 = __p1_612; \
-  int32x4_t __s2_612 = __p2_612; \
-  int64x2_t __rev0_612;  __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \
-  int32x2_t __rev1_612;  __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 1, 0); \
-  int32x4_t __rev2_612;  __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \
-  int64x2_t __ret_612; \
-  __ret_612 = __noswap_vqdmlal_s32(__rev0_612, __rev1_612, __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \
-  __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \
-  __ret_612; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlal_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \
-  int32x4_t __s0_613 = __p0_613; \
-  int16x4_t __s1_613 = __p1_613; \
-  int16x8_t __s2_613 = __p2_613; \
-  int32x4_t __ret_613; \
-  __ret_613 = vqdmlal_s16(__s0_613, __s1_613, splat_laneq_s16(__s2_613, __p3_613)); \
-  __ret_613; \
-})
-#else
-#define vqdmlal_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \
-  int32x4_t __s0_614 = __p0_614; \
-  int16x4_t __s1_614 = __p1_614; \
-  int16x8_t __s2_614 = __p2_614; \
-  int32x4_t __rev0_614;  __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \
-  int16x4_t __rev1_614;  __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 3, 2, 1, 0); \
-  int16x8_t __rev2_614;  __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_614; \
-  __ret_614 = __noswap_vqdmlal_s16(__rev0_614, __rev1_614, __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \
-  __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \
-  __ret_614; \
-})
-#endif
-
-__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2);
-  return __ret;
-}
-__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \
-  int64x2_t __s0_615 = __p0_615; \
-  int32x4_t __s1_615 = __p1_615; \
-  int32x2_t __s2_615 = __p2_615; \
-  int64x2_t __ret_615; \
-  __ret_615 = vqdmlsl_s32(__s0_615, vget_high_s32(__s1_615), splat_lane_s32(__s2_615, __p3_615)); \
-  __ret_615; \
-})
-#else
-#define vqdmlsl_high_lane_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \
-  int64x2_t __s0_616 = __p0_616; \
-  int32x4_t __s1_616 = __p1_616; \
-  int32x2_t __s2_616 = __p2_616; \
-  int64x2_t __rev0_616;  __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \
-  int32x4_t __rev1_616;  __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 3, 2, 1, 0); \
-  int32x2_t __rev2_616;  __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 1, 0); \
-  int64x2_t __ret_616; \
-  __ret_616 = __noswap_vqdmlsl_s32(__rev0_616, __noswap_vget_high_s32(__rev1_616), __noswap_splat_lane_s32(__rev2_616, __p3_616)); \
-  __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \
-  __ret_616; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_lane_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \
-  int32x4_t __s0_617 = __p0_617; \
-  int16x8_t __s1_617 = __p1_617; \
-  int16x4_t __s2_617 = __p2_617; \
-  int32x4_t __ret_617; \
-  __ret_617 = vqdmlsl_s16(__s0_617, vget_high_s16(__s1_617), splat_lane_s16(__s2_617, __p3_617)); \
-  __ret_617; \
-})
-#else
-#define vqdmlsl_high_lane_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \
-  int32x4_t __s0_618 = __p0_618; \
-  int16x8_t __s1_618 = __p1_618; \
-  int16x4_t __s2_618 = __p2_618; \
-  int32x4_t __rev0_618;  __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \
-  int16x8_t __rev1_618;  __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev2_618;  __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 3, 2, 1, 0); \
-  int32x4_t __ret_618; \
-  __ret_618 = __noswap_vqdmlsl_s16(__rev0_618, __noswap_vget_high_s16(__rev1_618), __noswap_splat_lane_s16(__rev2_618, __p3_618)); \
-  __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \
-  __ret_618; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s32(__p0_619, __p1_619, __p2_619, __p3_619) __extension__ ({ \
-  int64x2_t __s0_619 = __p0_619; \
-  int32x4_t __s1_619 = __p1_619; \
-  int32x4_t __s2_619 = __p2_619; \
-  int64x2_t __ret_619; \
-  __ret_619 = vqdmlsl_s32(__s0_619, vget_high_s32(__s1_619), splat_laneq_s32(__s2_619, __p3_619)); \
-  __ret_619; \
-})
-#else
-#define vqdmlsl_high_laneq_s32(__p0_620, __p1_620, __p2_620, __p3_620) __extension__ ({ \
-  int64x2_t __s0_620 = __p0_620; \
-  int32x4_t __s1_620 = __p1_620; \
-  int32x4_t __s2_620 = __p2_620; \
-  int64x2_t __rev0_620;  __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, 1, 0); \
-  int32x4_t __rev1_620;  __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, 3, 2, 1, 0); \
-  int32x4_t __rev2_620;  __rev2_620 = __builtin_shufflevector(__s2_620, __s2_620, 3, 2, 1, 0); \
-  int64x2_t __ret_620; \
-  __ret_620 = __noswap_vqdmlsl_s32(__rev0_620, __noswap_vget_high_s32(__rev1_620), __noswap_splat_laneq_s32(__rev2_620, __p3_620)); \
-  __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 1, 0); \
-  __ret_620; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_high_laneq_s16(__p0_621, __p1_621, __p2_621, __p3_621) __extension__ ({ \
-  int32x4_t __s0_621 = __p0_621; \
-  int16x8_t __s1_621 = __p1_621; \
-  int16x8_t __s2_621 = __p2_621; \
-  int32x4_t __ret_621; \
-  __ret_621 = vqdmlsl_s16(__s0_621, vget_high_s16(__s1_621), splat_laneq_s16(__s2_621, __p3_621)); \
-  __ret_621; \
-})
-#else
-#define vqdmlsl_high_laneq_s16(__p0_622, __p1_622, __p2_622, __p3_622) __extension__ ({ \
-  int32x4_t __s0_622 = __p0_622; \
-  int16x8_t __s1_622 = __p1_622; \
-  int16x8_t __s2_622 = __p2_622; \
-  int32x4_t __rev0_622;  __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, 3, 2, 1, 0); \
-  int16x8_t __rev1_622;  __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev2_622;  __rev2_622 = __builtin_shufflevector(__s2_622, __s2_622, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_622; \
-  __ret_622 = __noswap_vqdmlsl_s16(__rev0_622, __noswap_vget_high_s16(__rev1_622), __noswap_splat_laneq_s16(__rev2_622, __p3_622)); \
-  __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0); \
-  __ret_622; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x2_t __s2 = __p2; \
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x4_t __s2 = __p2; \
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __s1 = __p1; \
-  int32x4_t __s2 = __p2; \
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \
-  __ret; \
-})
-#else
-#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __s1 = __p1; \
-  int16x8_t __s2 = __p2; \
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s32(__p0_623, __p1_623, __p2_623, __p3_623) __extension__ ({ \
-  int64x2_t __s0_623 = __p0_623; \
-  int32x2_t __s1_623 = __p1_623; \
-  int32x4_t __s2_623 = __p2_623; \
-  int64x2_t __ret_623; \
-  __ret_623 = vqdmlsl_s32(__s0_623, __s1_623, splat_laneq_s32(__s2_623, __p3_623)); \
-  __ret_623; \
-})
-#else
-#define vqdmlsl_laneq_s32(__p0_624, __p1_624, __p2_624, __p3_624) __extension__ ({ \
-  int64x2_t __s0_624 = __p0_624; \
-  int32x2_t __s1_624 = __p1_624; \
-  int32x4_t __s2_624 = __p2_624; \
-  int64x2_t __rev0_624;  __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, 1, 0); \
-  int32x2_t __rev1_624;  __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, 1, 0); \
-  int32x4_t __rev2_624;  __rev2_624 = __builtin_shufflevector(__s2_624, __s2_624, 3, 2, 1, 0); \
-  int64x2_t __ret_624; \
-  __ret_624 = __noswap_vqdmlsl_s32(__rev0_624, __rev1_624, __noswap_splat_laneq_s32(__rev2_624, __p3_624)); \
-  __ret_624 = __builtin_shufflevector(__ret_624, __ret_624, 1, 0); \
-  __ret_624; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmlsl_laneq_s16(__p0_625, __p1_625, __p2_625, __p3_625) __extension__ ({ \
-  int32x4_t __s0_625 = __p0_625; \
-  int16x4_t __s1_625 = __p1_625; \
-  int16x8_t __s2_625 = __p2_625; \
-  int32x4_t __ret_625; \
-  __ret_625 = vqdmlsl_s16(__s0_625, __s1_625, splat_laneq_s16(__s2_625, __p3_625)); \
-  __ret_625; \
-})
-#else
-#define vqdmlsl_laneq_s16(__p0_626, __p1_626, __p2_626, __p3_626) __extension__ ({ \
-  int32x4_t __s0_626 = __p0_626; \
-  int16x4_t __s1_626 = __p1_626; \
-  int16x8_t __s2_626 = __p2_626; \
-  int32x4_t __rev0_626;  __rev0_626 = __builtin_shufflevector(__s0_626, __s0_626, 3, 2, 1, 0); \
-  int16x4_t __rev1_626;  __rev1_626 = __builtin_shufflevector(__s1_626, __s1_626, 3, 2, 1, 0); \
-  int16x8_t __rev2_626;  __rev2_626 = __builtin_shufflevector(__s2_626, __s2_626, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_626; \
-  __ret_626 = __noswap_vqdmlsl_s16(__rev0_626, __rev1_626, __noswap_splat_laneq_s16(__rev2_626, __p3_626)); \
-  __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); \
-  __ret_626; \
-})
-#endif
-
-__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_lane_s32(__p0_627, __p1_627, __p2_627) __extension__ ({ \
-  int32_t __s0_627 = __p0_627; \
-  int32x2_t __s1_627 = __p1_627; \
-  int32_t __ret_627; \
-  __ret_627 = vqdmulhs_s32(__s0_627, vget_lane_s32(__s1_627, __p2_627)); \
-  __ret_627; \
-})
-#else
-#define vqdmulhs_lane_s32(__p0_628, __p1_628, __p2_628) __extension__ ({ \
-  int32_t __s0_628 = __p0_628; \
-  int32x2_t __s1_628 = __p1_628; \
-  int32x2_t __rev1_628;  __rev1_628 = __builtin_shufflevector(__s1_628, __s1_628, 1, 0); \
-  int32_t __ret_628; \
-  __ret_628 = vqdmulhs_s32(__s0_628, __noswap_vget_lane_s32(__rev1_628, __p2_628)); \
-  __ret_628; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_lane_s16(__p0_629, __p1_629, __p2_629) __extension__ ({ \
-  int16_t __s0_629 = __p0_629; \
-  int16x4_t __s1_629 = __p1_629; \
-  int16_t __ret_629; \
-  __ret_629 = vqdmulhh_s16(__s0_629, vget_lane_s16(__s1_629, __p2_629)); \
-  __ret_629; \
-})
-#else
-#define vqdmulhh_lane_s16(__p0_630, __p1_630, __p2_630) __extension__ ({ \
-  int16_t __s0_630 = __p0_630; \
-  int16x4_t __s1_630 = __p1_630; \
-  int16x4_t __rev1_630;  __rev1_630 = __builtin_shufflevector(__s1_630, __s1_630, 3, 2, 1, 0); \
-  int16_t __ret_630; \
-  __ret_630 = vqdmulhh_s16(__s0_630, __noswap_vget_lane_s16(__rev1_630, __p2_630)); \
-  __ret_630; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhs_laneq_s32(__p0_631, __p1_631, __p2_631) __extension__ ({ \
-  int32_t __s0_631 = __p0_631; \
-  int32x4_t __s1_631 = __p1_631; \
-  int32_t __ret_631; \
-  __ret_631 = vqdmulhs_s32(__s0_631, vgetq_lane_s32(__s1_631, __p2_631)); \
-  __ret_631; \
-})
-#else
-#define vqdmulhs_laneq_s32(__p0_632, __p1_632, __p2_632) __extension__ ({ \
-  int32_t __s0_632 = __p0_632; \
-  int32x4_t __s1_632 = __p1_632; \
-  int32x4_t __rev1_632;  __rev1_632 = __builtin_shufflevector(__s1_632, __s1_632, 3, 2, 1, 0); \
-  int32_t __ret_632; \
-  __ret_632 = vqdmulhs_s32(__s0_632, __noswap_vgetq_lane_s32(__rev1_632, __p2_632)); \
-  __ret_632; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhh_laneq_s16(__p0_633, __p1_633, __p2_633) __extension__ ({ \
-  int16_t __s0_633 = __p0_633; \
-  int16x8_t __s1_633 = __p1_633; \
-  int16_t __ret_633; \
-  __ret_633 = vqdmulhh_s16(__s0_633, vgetq_lane_s16(__s1_633, __p2_633)); \
-  __ret_633; \
-})
-#else
-#define vqdmulhh_laneq_s16(__p0_634, __p1_634, __p2_634) __extension__ ({ \
-  int16_t __s0_634 = __p0_634; \
-  int16x8_t __s1_634 = __p1_634; \
-  int16x8_t __rev1_634;  __rev1_634 = __builtin_shufflevector(__s1_634, __s1_634, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_634; \
-  __ret_634 = vqdmulhh_s16(__s0_634, __noswap_vgetq_lane_s16(__rev1_634, __p2_634)); \
-  __ret_634; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s32(__p0_635, __p1_635, __p2_635) __extension__ ({ \
-  int32x4_t __s0_635 = __p0_635; \
-  int32x2_t __s1_635 = __p1_635; \
-  int64x2_t __ret_635; \
-  __ret_635 = vqdmull_s32(vget_high_s32(__s0_635), splat_lane_s32(__s1_635, __p2_635)); \
-  __ret_635; \
-})
-#else
-#define vqdmull_high_lane_s32(__p0_636, __p1_636, __p2_636) __extension__ ({ \
-  int32x4_t __s0_636 = __p0_636; \
-  int32x2_t __s1_636 = __p1_636; \
-  int32x4_t __rev0_636;  __rev0_636 = __builtin_shufflevector(__s0_636, __s0_636, 3, 2, 1, 0); \
-  int32x2_t __rev1_636;  __rev1_636 = __builtin_shufflevector(__s1_636, __s1_636, 1, 0); \
-  int64x2_t __ret_636; \
-  __ret_636 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_636), __noswap_splat_lane_s32(__rev1_636, __p2_636)); \
-  __ret_636 = __builtin_shufflevector(__ret_636, __ret_636, 1, 0); \
-  __ret_636; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_lane_s16(__p0_637, __p1_637, __p2_637) __extension__ ({ \
-  int16x8_t __s0_637 = __p0_637; \
-  int16x4_t __s1_637 = __p1_637; \
-  int32x4_t __ret_637; \
-  __ret_637 = vqdmull_s16(vget_high_s16(__s0_637), splat_lane_s16(__s1_637, __p2_637)); \
-  __ret_637; \
-})
-#else
-#define vqdmull_high_lane_s16(__p0_638, __p1_638, __p2_638) __extension__ ({ \
-  int16x8_t __s0_638 = __p0_638; \
-  int16x4_t __s1_638 = __p1_638; \
-  int16x8_t __rev0_638;  __rev0_638 = __builtin_shufflevector(__s0_638, __s0_638, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1_638;  __rev1_638 = __builtin_shufflevector(__s1_638, __s1_638, 3, 2, 1, 0); \
-  int32x4_t __ret_638; \
-  __ret_638 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_638), __noswap_splat_lane_s16(__rev1_638, __p2_638)); \
-  __ret_638 = __builtin_shufflevector(__ret_638, __ret_638, 3, 2, 1, 0); \
-  __ret_638; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \
-  int32x4_t __s0_639 = __p0_639; \
-  int32x4_t __s1_639 = __p1_639; \
-  int64x2_t __ret_639; \
-  __ret_639 = vqdmull_s32(vget_high_s32(__s0_639), splat_laneq_s32(__s1_639, __p2_639)); \
-  __ret_639; \
-})
-#else
-#define vqdmull_high_laneq_s32(__p0_640, __p1_640, __p2_640) __extension__ ({ \
-  int32x4_t __s0_640 = __p0_640; \
-  int32x4_t __s1_640 = __p1_640; \
-  int32x4_t __rev0_640;  __rev0_640 = __builtin_shufflevector(__s0_640, __s0_640, 3, 2, 1, 0); \
-  int32x4_t __rev1_640;  __rev1_640 = __builtin_shufflevector(__s1_640, __s1_640, 3, 2, 1, 0); \
-  int64x2_t __ret_640; \
-  __ret_640 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_640), __noswap_splat_laneq_s32(__rev1_640, __p2_640)); \
-  __ret_640 = __builtin_shufflevector(__ret_640, __ret_640, 1, 0); \
-  __ret_640; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_high_laneq_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \
-  int16x8_t __s0_641 = __p0_641; \
-  int16x8_t __s1_641 = __p1_641; \
-  int32x4_t __ret_641; \
-  __ret_641 = vqdmull_s16(vget_high_s16(__s0_641), splat_laneq_s16(__s1_641, __p2_641)); \
-  __ret_641; \
-})
-#else
-#define vqdmull_high_laneq_s16(__p0_642, __p1_642, __p2_642) __extension__ ({ \
-  int16x8_t __s0_642 = __p0_642; \
-  int16x8_t __s1_642 = __p1_642; \
-  int16x8_t __rev0_642;  __rev0_642 = __builtin_shufflevector(__s0_642, __s0_642, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_642;  __rev1_642 = __builtin_shufflevector(__s1_642, __s1_642, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_642; \
-  __ret_642 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_642), __noswap_splat_laneq_s16(__rev1_642, __p2_642)); \
-  __ret_642 = __builtin_shufflevector(__ret_642, __ret_642, 3, 2, 1, 0); \
-  __ret_642; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int64x2_t __ret;
-  __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int32x4_t __ret;
-  __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_lane_s32(__p0_643, __p1_643, __p2_643) __extension__ ({ \
-  int32_t __s0_643 = __p0_643; \
-  int32x2_t __s1_643 = __p1_643; \
-  int64_t __ret_643; \
-  __ret_643 = vqdmulls_s32(__s0_643, vget_lane_s32(__s1_643, __p2_643)); \
-  __ret_643; \
-})
-#else
-#define vqdmulls_lane_s32(__p0_644, __p1_644, __p2_644) __extension__ ({ \
-  int32_t __s0_644 = __p0_644; \
-  int32x2_t __s1_644 = __p1_644; \
-  int32x2_t __rev1_644;  __rev1_644 = __builtin_shufflevector(__s1_644, __s1_644, 1, 0); \
-  int64_t __ret_644; \
-  __ret_644 = vqdmulls_s32(__s0_644, __noswap_vget_lane_s32(__rev1_644, __p2_644)); \
-  __ret_644; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_lane_s16(__p0_645, __p1_645, __p2_645) __extension__ ({ \
-  int16_t __s0_645 = __p0_645; \
-  int16x4_t __s1_645 = __p1_645; \
-  int32_t __ret_645; \
-  __ret_645 = vqdmullh_s16(__s0_645, vget_lane_s16(__s1_645, __p2_645)); \
-  __ret_645; \
-})
-#else
-#define vqdmullh_lane_s16(__p0_646, __p1_646, __p2_646) __extension__ ({ \
-  int16_t __s0_646 = __p0_646; \
-  int16x4_t __s1_646 = __p1_646; \
-  int16x4_t __rev1_646;  __rev1_646 = __builtin_shufflevector(__s1_646, __s1_646, 3, 2, 1, 0); \
-  int32_t __ret_646; \
-  __ret_646 = vqdmullh_s16(__s0_646, __noswap_vget_lane_s16(__rev1_646, __p2_646)); \
-  __ret_646; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmulls_laneq_s32(__p0_647, __p1_647, __p2_647) __extension__ ({ \
-  int32_t __s0_647 = __p0_647; \
-  int32x4_t __s1_647 = __p1_647; \
-  int64_t __ret_647; \
-  __ret_647 = vqdmulls_s32(__s0_647, vgetq_lane_s32(__s1_647, __p2_647)); \
-  __ret_647; \
-})
-#else
-#define vqdmulls_laneq_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \
-  int32_t __s0_648 = __p0_648; \
-  int32x4_t __s1_648 = __p1_648; \
-  int32x4_t __rev1_648;  __rev1_648 = __builtin_shufflevector(__s1_648, __s1_648, 3, 2, 1, 0); \
-  int64_t __ret_648; \
-  __ret_648 = vqdmulls_s32(__s0_648, __noswap_vgetq_lane_s32(__rev1_648, __p2_648)); \
-  __ret_648; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmullh_laneq_s16(__p0_649, __p1_649, __p2_649) __extension__ ({ \
-  int16_t __s0_649 = __p0_649; \
-  int16x8_t __s1_649 = __p1_649; \
-  int32_t __ret_649; \
-  __ret_649 = vqdmullh_s16(__s0_649, vgetq_lane_s16(__s1_649, __p2_649)); \
-  __ret_649; \
-})
-#else
-#define vqdmullh_laneq_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \
-  int16_t __s0_650 = __p0_650; \
-  int16x8_t __s1_650 = __p1_650; \
-  int16x8_t __rev1_650;  __rev1_650 = __builtin_shufflevector(__s1_650, __s1_650, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32_t __ret_650; \
-  __ret_650 = vqdmullh_s16(__s0_650, __noswap_vgetq_lane_s16(__rev1_650, __p2_650)); \
-  __ret_650; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \
-  int32x2_t __s0_651 = __p0_651; \
-  int32x4_t __s1_651 = __p1_651; \
-  int64x2_t __ret_651; \
-  __ret_651 = vqdmull_s32(__s0_651, splat_laneq_s32(__s1_651, __p2_651)); \
-  __ret_651; \
-})
-#else
-#define vqdmull_laneq_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \
-  int32x2_t __s0_652 = __p0_652; \
-  int32x4_t __s1_652 = __p1_652; \
-  int32x2_t __rev0_652;  __rev0_652 = __builtin_shufflevector(__s0_652, __s0_652, 1, 0); \
-  int32x4_t __rev1_652;  __rev1_652 = __builtin_shufflevector(__s1_652, __s1_652, 3, 2, 1, 0); \
-  int64x2_t __ret_652; \
-  __ret_652 = __noswap_vqdmull_s32(__rev0_652, __noswap_splat_laneq_s32(__rev1_652, __p2_652)); \
-  __ret_652 = __builtin_shufflevector(__ret_652, __ret_652, 1, 0); \
-  __ret_652; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqdmull_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \
-  int16x4_t __s0_653 = __p0_653; \
-  int16x8_t __s1_653 = __p1_653; \
-  int32x4_t __ret_653; \
-  __ret_653 = vqdmull_s16(__s0_653, splat_laneq_s16(__s1_653, __p2_653)); \
-  __ret_653; \
-})
-#else
-#define vqdmull_laneq_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \
-  int16x4_t __s0_654 = __p0_654; \
-  int16x8_t __s1_654 = __p1_654; \
-  int16x4_t __rev0_654;  __rev0_654 = __builtin_shufflevector(__s0_654, __s0_654, 3, 2, 1, 0); \
-  int16x8_t __rev1_654;  __rev1_654 = __builtin_shufflevector(__s1_654, __s1_654, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_654; \
-  __ret_654 = __noswap_vqdmull_s16(__rev0_654, __noswap_splat_laneq_s16(__rev1_654, __p2_654)); \
-  __ret_654 = __builtin_shufflevector(__ret_654, __ret_654, 3, 2, 1, 0); \
-  __ret_654; \
-})
-#endif
-
-__ai int16_t vqmovns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovnd_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovnh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0);
-  return __ret;
-}
-__ai uint16_t vqmovns_u32(uint32_t __p0) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0);
-  return __ret;
-}
-__ai uint32_t vqmovnd_u64(uint64_t __p0) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0);
-  return __ret;
-}
-__ai uint8_t vqmovnh_u16(uint16_t __p0) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vqmovn_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vqmovn_u64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vqmovn_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vqmovn_s32(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vqmovn_s64(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vqmovn_s16(__p1));
-  return __ret;
-}
-#else
-__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int16_t vqmovuns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
-  return __ret;
-}
-__ai int32_t vqmovund_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
-  return __ret;
-}
-__ai int8_t vqmovunh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
-  return __ret;
-}
-#else
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vqnegq_s64(int64x2_t __p0) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vqneg_s64(int64x1_t __p0) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3);
-  return __ret;
-}
-__ai int8_t vqnegb_s8(int8_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0);
-  return __ret;
-}
-__ai int32_t vqnegs_s32(int32_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0);
-  return __ret;
-}
-__ai int64_t vqnegd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0);
-  return __ret;
-}
-__ai int16_t vqnegh_s16(int16_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0);
-  return __ret;
-}
-__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x2_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x4_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_lane_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \
-  int32_t __s0_655 = __p0_655; \
-  int32x2_t __s1_655 = __p1_655; \
-  int32_t __ret_655; \
-  __ret_655 = vqrdmulhs_s32(__s0_655, vget_lane_s32(__s1_655, __p2_655)); \
-  __ret_655; \
-})
-#else
-#define vqrdmulhs_lane_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \
-  int32_t __s0_656 = __p0_656; \
-  int32x2_t __s1_656 = __p1_656; \
-  int32x2_t __rev1_656;  __rev1_656 = __builtin_shufflevector(__s1_656, __s1_656, 1, 0); \
-  int32_t __ret_656; \
-  __ret_656 = vqrdmulhs_s32(__s0_656, __noswap_vget_lane_s32(__rev1_656, __p2_656)); \
-  __ret_656; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_lane_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \
-  int16_t __s0_657 = __p0_657; \
-  int16x4_t __s1_657 = __p1_657; \
-  int16_t __ret_657; \
-  __ret_657 = vqrdmulhh_s16(__s0_657, vget_lane_s16(__s1_657, __p2_657)); \
-  __ret_657; \
-})
-#else
-#define vqrdmulhh_lane_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \
-  int16_t __s0_658 = __p0_658; \
-  int16x4_t __s1_658 = __p1_658; \
-  int16x4_t __rev1_658;  __rev1_658 = __builtin_shufflevector(__s1_658, __s1_658, 3, 2, 1, 0); \
-  int16_t __ret_658; \
-  __ret_658 = vqrdmulhh_s16(__s0_658, __noswap_vget_lane_s16(__rev1_658, __p2_658)); \
-  __ret_658; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhs_laneq_s32(__p0_659, __p1_659, __p2_659) __extension__ ({ \
-  int32_t __s0_659 = __p0_659; \
-  int32x4_t __s1_659 = __p1_659; \
-  int32_t __ret_659; \
-  __ret_659 = vqrdmulhs_s32(__s0_659, vgetq_lane_s32(__s1_659, __p2_659)); \
-  __ret_659; \
-})
-#else
-#define vqrdmulhs_laneq_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \
-  int32_t __s0_660 = __p0_660; \
-  int32x4_t __s1_660 = __p1_660; \
-  int32x4_t __rev1_660;  __rev1_660 = __builtin_shufflevector(__s1_660, __s1_660, 3, 2, 1, 0); \
-  int32_t __ret_660; \
-  __ret_660 = vqrdmulhs_s32(__s0_660, __noswap_vgetq_lane_s32(__rev1_660, __p2_660)); \
-  __ret_660; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhh_laneq_s16(__p0_661, __p1_661, __p2_661) __extension__ ({ \
-  int16_t __s0_661 = __p0_661; \
-  int16x8_t __s1_661 = __p1_661; \
-  int16_t __ret_661; \
-  __ret_661 = vqrdmulhh_s16(__s0_661, vgetq_lane_s16(__s1_661, __p2_661)); \
-  __ret_661; \
-})
-#else
-#define vqrdmulhh_laneq_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \
-  int16_t __s0_662 = __p0_662; \
-  int16x8_t __s1_662 = __p1_662; \
-  int16x8_t __rev1_662;  __rev1_662 = __builtin_shufflevector(__s1_662, __s1_662, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_662; \
-  __ret_662 = vqrdmulhh_s16(__s0_662, __noswap_vgetq_lane_s16(__rev1_662, __p2_662)); \
-  __ret_662; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x4_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x4_t __ret; \
-  __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \
-  __ret; \
-})
-#else
-#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x8_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret; \
-  __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \
-  int32x2_t __s0 = __p0; \
-  int32x4_t __s1 = __p1; \
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \
-  int32x2_t __ret; \
-  __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \
-  __ret; \
-})
-#else
-#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \
-  int16x4_t __s0 = __p0; \
-  int16x8_t __s1 = __p1; \
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x4_t __ret; \
-  __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u32(__p0_663, __p1_663, __p2_663) __extension__ ({ \
-  uint16x4_t __s0_663 = __p0_663; \
-  uint32x4_t __s1_663 = __p1_663; \
-  uint16x8_t __ret_663; \
-  __ret_663 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_663), (uint16x4_t)(vqrshrn_n_u32(__s1_663, __p2_663)))); \
-  __ret_663; \
-})
-#else
-#define vqrshrn_high_n_u32(__p0_664, __p1_664, __p2_664) __extension__ ({ \
-  uint16x4_t __s0_664 = __p0_664; \
-  uint32x4_t __s1_664 = __p1_664; \
-  uint16x4_t __rev0_664;  __rev0_664 = __builtin_shufflevector(__s0_664, __s0_664, 3, 2, 1, 0); \
-  uint32x4_t __rev1_664;  __rev1_664 = __builtin_shufflevector(__s1_664, __s1_664, 3, 2, 1, 0); \
-  uint16x8_t __ret_664; \
-  __ret_664 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_664), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_664, __p2_664)))); \
-  __ret_664 = __builtin_shufflevector(__ret_664, __ret_664, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_664; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u64(__p0_665, __p1_665, __p2_665) __extension__ ({ \
-  uint32x2_t __s0_665 = __p0_665; \
-  uint64x2_t __s1_665 = __p1_665; \
-  uint32x4_t __ret_665; \
-  __ret_665 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_665), (uint32x2_t)(vqrshrn_n_u64(__s1_665, __p2_665)))); \
-  __ret_665; \
-})
-#else
-#define vqrshrn_high_n_u64(__p0_666, __p1_666, __p2_666) __extension__ ({ \
-  uint32x2_t __s0_666 = __p0_666; \
-  uint64x2_t __s1_666 = __p1_666; \
-  uint32x2_t __rev0_666;  __rev0_666 = __builtin_shufflevector(__s0_666, __s0_666, 1, 0); \
-  uint64x2_t __rev1_666;  __rev1_666 = __builtin_shufflevector(__s1_666, __s1_666, 1, 0); \
-  uint32x4_t __ret_666; \
-  __ret_666 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_666), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_666, __p2_666)))); \
-  __ret_666 = __builtin_shufflevector(__ret_666, __ret_666, 3, 2, 1, 0); \
-  __ret_666; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_u16(__p0_667, __p1_667, __p2_667) __extension__ ({ \
-  uint8x8_t __s0_667 = __p0_667; \
-  uint16x8_t __s1_667 = __p1_667; \
-  uint8x16_t __ret_667; \
-  __ret_667 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_667), (uint8x8_t)(vqrshrn_n_u16(__s1_667, __p2_667)))); \
-  __ret_667; \
-})
-#else
-#define vqrshrn_high_n_u16(__p0_668, __p1_668, __p2_668) __extension__ ({ \
-  uint8x8_t __s0_668 = __p0_668; \
-  uint16x8_t __s1_668 = __p1_668; \
-  uint8x8_t __rev0_668;  __rev0_668 = __builtin_shufflevector(__s0_668, __s0_668, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_668;  __rev1_668 = __builtin_shufflevector(__s1_668, __s1_668, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_668; \
-  __ret_668 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_668), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_668, __p2_668)))); \
-  __ret_668 = __builtin_shufflevector(__ret_668, __ret_668, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_668; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \
-  int16x4_t __s0_669 = __p0_669; \
-  int32x4_t __s1_669 = __p1_669; \
-  int16x8_t __ret_669; \
-  __ret_669 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_669), (int16x4_t)(vqrshrn_n_s32(__s1_669, __p2_669)))); \
-  __ret_669; \
-})
-#else
-#define vqrshrn_high_n_s32(__p0_670, __p1_670, __p2_670) __extension__ ({ \
-  int16x4_t __s0_670 = __p0_670; \
-  int32x4_t __s1_670 = __p1_670; \
-  int16x4_t __rev0_670;  __rev0_670 = __builtin_shufflevector(__s0_670, __s0_670, 3, 2, 1, 0); \
-  int32x4_t __rev1_670;  __rev1_670 = __builtin_shufflevector(__s1_670, __s1_670, 3, 2, 1, 0); \
-  int16x8_t __ret_670; \
-  __ret_670 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_670), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_670, __p2_670)))); \
-  __ret_670 = __builtin_shufflevector(__ret_670, __ret_670, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_670; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s64(__p0_671, __p1_671, __p2_671) __extension__ ({ \
-  int32x2_t __s0_671 = __p0_671; \
-  int64x2_t __s1_671 = __p1_671; \
-  int32x4_t __ret_671; \
-  __ret_671 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_671), (int32x2_t)(vqrshrn_n_s64(__s1_671, __p2_671)))); \
-  __ret_671; \
-})
-#else
-#define vqrshrn_high_n_s64(__p0_672, __p1_672, __p2_672) __extension__ ({ \
-  int32x2_t __s0_672 = __p0_672; \
-  int64x2_t __s1_672 = __p1_672; \
-  int32x2_t __rev0_672;  __rev0_672 = __builtin_shufflevector(__s0_672, __s0_672, 1, 0); \
-  int64x2_t __rev1_672;  __rev1_672 = __builtin_shufflevector(__s1_672, __s1_672, 1, 0); \
-  int32x4_t __ret_672; \
-  __ret_672 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_672), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_672, __p2_672)))); \
-  __ret_672 = __builtin_shufflevector(__ret_672, __ret_672, 3, 2, 1, 0); \
-  __ret_672; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrn_high_n_s16(__p0_673, __p1_673, __p2_673) __extension__ ({ \
-  int8x8_t __s0_673 = __p0_673; \
-  int16x8_t __s1_673 = __p1_673; \
-  int8x16_t __ret_673; \
-  __ret_673 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_673), (int8x8_t)(vqrshrn_n_s16(__s1_673, __p2_673)))); \
-  __ret_673; \
-})
-#else
-#define vqrshrn_high_n_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \
-  int8x8_t __s0_674 = __p0_674; \
-  int16x8_t __s1_674 = __p1_674; \
-  int8x8_t __rev0_674;  __rev0_674 = __builtin_shufflevector(__s0_674, __s0_674, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_674;  __rev1_674 = __builtin_shufflevector(__s1_674, __s1_674, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_674; \
-  __ret_674 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_674), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_674, __p2_674)))); \
-  __ret_674 = __builtin_shufflevector(__ret_674, __ret_674, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_674; \
-})
-#endif
-
-#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s32(__p0_675, __p1_675, __p2_675) __extension__ ({ \
-  int16x4_t __s0_675 = __p0_675; \
-  int32x4_t __s1_675 = __p1_675; \
-  int16x8_t __ret_675; \
-  __ret_675 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_675), (int16x4_t)(vqrshrun_n_s32(__s1_675, __p2_675)))); \
-  __ret_675; \
-})
-#else
-#define vqrshrun_high_n_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \
-  int16x4_t __s0_676 = __p0_676; \
-  int32x4_t __s1_676 = __p1_676; \
-  int16x4_t __rev0_676;  __rev0_676 = __builtin_shufflevector(__s0_676, __s0_676, 3, 2, 1, 0); \
-  int32x4_t __rev1_676;  __rev1_676 = __builtin_shufflevector(__s1_676, __s1_676, 3, 2, 1, 0); \
-  int16x8_t __ret_676; \
-  __ret_676 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_676), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_676, __p2_676)))); \
-  __ret_676 = __builtin_shufflevector(__ret_676, __ret_676, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_676; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s64(__p0_677, __p1_677, __p2_677) __extension__ ({ \
-  int32x2_t __s0_677 = __p0_677; \
-  int64x2_t __s1_677 = __p1_677; \
-  int32x4_t __ret_677; \
-  __ret_677 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_677), (int32x2_t)(vqrshrun_n_s64(__s1_677, __p2_677)))); \
-  __ret_677; \
-})
-#else
-#define vqrshrun_high_n_s64(__p0_678, __p1_678, __p2_678) __extension__ ({ \
-  int32x2_t __s0_678 = __p0_678; \
-  int64x2_t __s1_678 = __p1_678; \
-  int32x2_t __rev0_678;  __rev0_678 = __builtin_shufflevector(__s0_678, __s0_678, 1, 0); \
-  int64x2_t __rev1_678;  __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 1, 0); \
-  int32x4_t __ret_678; \
-  __ret_678 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_678), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_678, __p2_678)))); \
-  __ret_678 = __builtin_shufflevector(__ret_678, __ret_678, 3, 2, 1, 0); \
-  __ret_678; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrshrun_high_n_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \
-  int8x8_t __s0_679 = __p0_679; \
-  int16x8_t __s1_679 = __p1_679; \
-  int8x16_t __ret_679; \
-  __ret_679 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_679), (int8x8_t)(vqrshrun_n_s16(__s1_679, __p2_679)))); \
-  __ret_679; \
-})
-#else
-#define vqrshrun_high_n_s16(__p0_680, __p1_680, __p2_680) __extension__ ({ \
-  int8x8_t __s0_680 = __p0_680; \
-  int16x8_t __s1_680 = __p1_680; \
-  int8x8_t __rev0_680;  __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_680;  __rev1_680 = __builtin_shufflevector(__s1_680, __s1_680, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_680; \
-  __ret_680 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_680), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_680, __p2_680)))); \
-  __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_680; \
-})
-#endif
-
-#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1);
-  return __ret;
-}
-#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \
-  uint8_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshls_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \
-  int8_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \
-  __ret; \
-})
-#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u32(__p0_681, __p1_681, __p2_681) __extension__ ({ \
-  uint16x4_t __s0_681 = __p0_681; \
-  uint32x4_t __s1_681 = __p1_681; \
-  uint16x8_t __ret_681; \
-  __ret_681 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_681), (uint16x4_t)(vqshrn_n_u32(__s1_681, __p2_681)))); \
-  __ret_681; \
-})
-#else
-#define vqshrn_high_n_u32(__p0_682, __p1_682, __p2_682) __extension__ ({ \
-  uint16x4_t __s0_682 = __p0_682; \
-  uint32x4_t __s1_682 = __p1_682; \
-  uint16x4_t __rev0_682;  __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 3, 2, 1, 0); \
-  uint32x4_t __rev1_682;  __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 3, 2, 1, 0); \
-  uint16x8_t __ret_682; \
-  __ret_682 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_682), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_682, __p2_682)))); \
-  __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_682; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u64(__p0_683, __p1_683, __p2_683) __extension__ ({ \
-  uint32x2_t __s0_683 = __p0_683; \
-  uint64x2_t __s1_683 = __p1_683; \
-  uint32x4_t __ret_683; \
-  __ret_683 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_683), (uint32x2_t)(vqshrn_n_u64(__s1_683, __p2_683)))); \
-  __ret_683; \
-})
-#else
-#define vqshrn_high_n_u64(__p0_684, __p1_684, __p2_684) __extension__ ({ \
-  uint32x2_t __s0_684 = __p0_684; \
-  uint64x2_t __s1_684 = __p1_684; \
-  uint32x2_t __rev0_684;  __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 1, 0); \
-  uint64x2_t __rev1_684;  __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 1, 0); \
-  uint32x4_t __ret_684; \
-  __ret_684 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_684), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_684, __p2_684)))); \
-  __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 3, 2, 1, 0); \
-  __ret_684; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_u16(__p0_685, __p1_685, __p2_685) __extension__ ({ \
-  uint8x8_t __s0_685 = __p0_685; \
-  uint16x8_t __s1_685 = __p1_685; \
-  uint8x16_t __ret_685; \
-  __ret_685 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_685), (uint8x8_t)(vqshrn_n_u16(__s1_685, __p2_685)))); \
-  __ret_685; \
-})
-#else
-#define vqshrn_high_n_u16(__p0_686, __p1_686, __p2_686) __extension__ ({ \
-  uint8x8_t __s0_686 = __p0_686; \
-  uint16x8_t __s1_686 = __p1_686; \
-  uint8x8_t __rev0_686;  __rev0_686 = __builtin_shufflevector(__s0_686, __s0_686, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_686;  __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_686; \
-  __ret_686 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_686), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_686, __p2_686)))); \
-  __ret_686 = __builtin_shufflevector(__ret_686, __ret_686, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_686; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s32(__p0_687, __p1_687, __p2_687) __extension__ ({ \
-  int16x4_t __s0_687 = __p0_687; \
-  int32x4_t __s1_687 = __p1_687; \
-  int16x8_t __ret_687; \
-  __ret_687 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_687), (int16x4_t)(vqshrn_n_s32(__s1_687, __p2_687)))); \
-  __ret_687; \
-})
-#else
-#define vqshrn_high_n_s32(__p0_688, __p1_688, __p2_688) __extension__ ({ \
-  int16x4_t __s0_688 = __p0_688; \
-  int32x4_t __s1_688 = __p1_688; \
-  int16x4_t __rev0_688;  __rev0_688 = __builtin_shufflevector(__s0_688, __s0_688, 3, 2, 1, 0); \
-  int32x4_t __rev1_688;  __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 3, 2, 1, 0); \
-  int16x8_t __ret_688; \
-  __ret_688 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_688), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_688, __p2_688)))); \
-  __ret_688 = __builtin_shufflevector(__ret_688, __ret_688, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_688; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s64(__p0_689, __p1_689, __p2_689) __extension__ ({ \
-  int32x2_t __s0_689 = __p0_689; \
-  int64x2_t __s1_689 = __p1_689; \
-  int32x4_t __ret_689; \
-  __ret_689 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_689), (int32x2_t)(vqshrn_n_s64(__s1_689, __p2_689)))); \
-  __ret_689; \
-})
-#else
-#define vqshrn_high_n_s64(__p0_690, __p1_690, __p2_690) __extension__ ({ \
-  int32x2_t __s0_690 = __p0_690; \
-  int64x2_t __s1_690 = __p1_690; \
-  int32x2_t __rev0_690;  __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 1, 0); \
-  int64x2_t __rev1_690;  __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 1, 0); \
-  int32x4_t __ret_690; \
-  __ret_690 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_690), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_690, __p2_690)))); \
-  __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 3, 2, 1, 0); \
-  __ret_690; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrn_high_n_s16(__p0_691, __p1_691, __p2_691) __extension__ ({ \
-  int8x8_t __s0_691 = __p0_691; \
-  int16x8_t __s1_691 = __p1_691; \
-  int8x16_t __ret_691; \
-  __ret_691 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_691), (int8x8_t)(vqshrn_n_s16(__s1_691, __p2_691)))); \
-  __ret_691; \
-})
-#else
-#define vqshrn_high_n_s16(__p0_692, __p1_692, __p2_692) __extension__ ({ \
-  int8x8_t __s0_692 = __p0_692; \
-  int16x8_t __s1_692 = __p1_692; \
-  int8x8_t __rev0_692;  __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_692;  __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_692; \
-  __ret_692 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_692), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_692, __p2_692)))); \
-  __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_692; \
-})
-#endif
-
-#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \
-  uint32_t __s0 = __p0; \
-  uint16_t __ret; \
-  __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint32_t __ret; \
-  __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \
-  uint16_t __s0 = __p0; \
-  uint8_t __ret; \
-  __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \
-  __ret; \
-})
-#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s32(__p0_693, __p1_693, __p2_693) __extension__ ({ \
-  int16x4_t __s0_693 = __p0_693; \
-  int32x4_t __s1_693 = __p1_693; \
-  int16x8_t __ret_693; \
-  __ret_693 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_693), (int16x4_t)(vqshrun_n_s32(__s1_693, __p2_693)))); \
-  __ret_693; \
-})
-#else
-#define vqshrun_high_n_s32(__p0_694, __p1_694, __p2_694) __extension__ ({ \
-  int16x4_t __s0_694 = __p0_694; \
-  int32x4_t __s1_694 = __p1_694; \
-  int16x4_t __rev0_694;  __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 3, 2, 1, 0); \
-  int32x4_t __rev1_694;  __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 3, 2, 1, 0); \
-  int16x8_t __ret_694; \
-  __ret_694 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_694), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_694, __p2_694)))); \
-  __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_694; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s64(__p0_695, __p1_695, __p2_695) __extension__ ({ \
-  int32x2_t __s0_695 = __p0_695; \
-  int64x2_t __s1_695 = __p1_695; \
-  int32x4_t __ret_695; \
-  __ret_695 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_695), (int32x2_t)(vqshrun_n_s64(__s1_695, __p2_695)))); \
-  __ret_695; \
-})
-#else
-#define vqshrun_high_n_s64(__p0_696, __p1_696, __p2_696) __extension__ ({ \
-  int32x2_t __s0_696 = __p0_696; \
-  int64x2_t __s1_696 = __p1_696; \
-  int32x2_t __rev0_696;  __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 1, 0); \
-  int64x2_t __rev1_696;  __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 1, 0); \
-  int32x4_t __ret_696; \
-  __ret_696 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_696), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_696, __p2_696)))); \
-  __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 3, 2, 1, 0); \
-  __ret_696; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqshrun_high_n_s16(__p0_697, __p1_697, __p2_697) __extension__ ({ \
-  int8x8_t __s0_697 = __p0_697; \
-  int16x8_t __s1_697 = __p1_697; \
-  int8x16_t __ret_697; \
-  __ret_697 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_697), (int8x8_t)(vqshrun_n_s16(__s1_697, __p2_697)))); \
-  __ret_697; \
-})
-#else
-#define vqshrun_high_n_s16(__p0_698, __p1_698, __p2_698) __extension__ ({ \
-  int8x8_t __s0_698 = __p0_698; \
-  int16x8_t __s1_698 = __p1_698; \
-  int8x8_t __rev0_698;  __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_698;  __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_698; \
-  __ret_698 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_698), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_698, __p2_698)))); \
-  __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_698; \
-})
-#endif
-
-#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \
-  int32_t __s0 = __p0; \
-  int16_t __ret; \
-  __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \
-  __ret; \
-})
-#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int32_t __ret; \
-  __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \
-  __ret; \
-})
-#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \
-  int16_t __s0 = __p0; \
-  int8_t __ret; \
-  __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \
-  __ret; \
-})
-__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) {
-  poly8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) {
-  uint8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) {
-  int8x16x2_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) {
-  poly8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) {
-  uint8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) {
-  int8x16x3_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) {
-  poly8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) {
-  uint8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) {
-  int8x16x4_t __rev0;
-  __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x2_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x3_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16x4_t __rev1;
-  __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4);
-  return __ret;
-}
-#else
-__ai poly8x8_t vrbit_p8(poly8x8_t __p0) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36);
-  return __ret;
-}
-#else
-__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vrbitq_s8(int8x16_t __p0) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vrbit_u8(uint8x8_t __p0) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vrbit_s8(int8x8_t __p0) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpeq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecpe_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrecped_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecped_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1);
-  return __ret;
-}
-__ai float64_t vrecpxd_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrecpxs_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
-  return __ret;
-}
-__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u32(__p0_699, __p1_699, __p2_699) __extension__ ({ \
-  uint16x4_t __s0_699 = __p0_699; \
-  uint32x4_t __s1_699 = __p1_699; \
-  uint16x8_t __ret_699; \
-  __ret_699 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_699), (uint16x4_t)(vrshrn_n_u32(__s1_699, __p2_699)))); \
-  __ret_699; \
-})
-#else
-#define vrshrn_high_n_u32(__p0_700, __p1_700, __p2_700) __extension__ ({ \
-  uint16x4_t __s0_700 = __p0_700; \
-  uint32x4_t __s1_700 = __p1_700; \
-  uint16x4_t __rev0_700;  __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 3, 2, 1, 0); \
-  uint32x4_t __rev1_700;  __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 3, 2, 1, 0); \
-  uint16x8_t __ret_700; \
-  __ret_700 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_700), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_700, __p2_700)))); \
-  __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_700; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u64(__p0_701, __p1_701, __p2_701) __extension__ ({ \
-  uint32x2_t __s0_701 = __p0_701; \
-  uint64x2_t __s1_701 = __p1_701; \
-  uint32x4_t __ret_701; \
-  __ret_701 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_701), (uint32x2_t)(vrshrn_n_u64(__s1_701, __p2_701)))); \
-  __ret_701; \
-})
-#else
-#define vrshrn_high_n_u64(__p0_702, __p1_702, __p2_702) __extension__ ({ \
-  uint32x2_t __s0_702 = __p0_702; \
-  uint64x2_t __s1_702 = __p1_702; \
-  uint32x2_t __rev0_702;  __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 1, 0); \
-  uint64x2_t __rev1_702;  __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 1, 0); \
-  uint32x4_t __ret_702; \
-  __ret_702 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_702), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_702, __p2_702)))); \
-  __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 3, 2, 1, 0); \
-  __ret_702; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_u16(__p0_703, __p1_703, __p2_703) __extension__ ({ \
-  uint8x8_t __s0_703 = __p0_703; \
-  uint16x8_t __s1_703 = __p1_703; \
-  uint8x16_t __ret_703; \
-  __ret_703 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_703), (uint8x8_t)(vrshrn_n_u16(__s1_703, __p2_703)))); \
-  __ret_703; \
-})
-#else
-#define vrshrn_high_n_u16(__p0_704, __p1_704, __p2_704) __extension__ ({ \
-  uint8x8_t __s0_704 = __p0_704; \
-  uint16x8_t __s1_704 = __p1_704; \
-  uint8x8_t __rev0_704;  __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_704;  __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_704; \
-  __ret_704 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_704), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_704, __p2_704)))); \
-  __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_704; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s32(__p0_705, __p1_705, __p2_705) __extension__ ({ \
-  int16x4_t __s0_705 = __p0_705; \
-  int32x4_t __s1_705 = __p1_705; \
-  int16x8_t __ret_705; \
-  __ret_705 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_705), (int16x4_t)(vrshrn_n_s32(__s1_705, __p2_705)))); \
-  __ret_705; \
-})
-#else
-#define vrshrn_high_n_s32(__p0_706, __p1_706, __p2_706) __extension__ ({ \
-  int16x4_t __s0_706 = __p0_706; \
-  int32x4_t __s1_706 = __p1_706; \
-  int16x4_t __rev0_706;  __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 3, 2, 1, 0); \
-  int32x4_t __rev1_706;  __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 3, 2, 1, 0); \
-  int16x8_t __ret_706; \
-  __ret_706 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_706), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_706, __p2_706)))); \
-  __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_706; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s64(__p0_707, __p1_707, __p2_707) __extension__ ({ \
-  int32x2_t __s0_707 = __p0_707; \
-  int64x2_t __s1_707 = __p1_707; \
-  int32x4_t __ret_707; \
-  __ret_707 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_707), (int32x2_t)(vrshrn_n_s64(__s1_707, __p2_707)))); \
-  __ret_707; \
-})
-#else
-#define vrshrn_high_n_s64(__p0_708, __p1_708, __p2_708) __extension__ ({ \
-  int32x2_t __s0_708 = __p0_708; \
-  int64x2_t __s1_708 = __p1_708; \
-  int32x2_t __rev0_708;  __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 1, 0); \
-  int64x2_t __rev1_708;  __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 1, 0); \
-  int32x4_t __ret_708; \
-  __ret_708 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_708), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_708, __p2_708)))); \
-  __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 3, 2, 1, 0); \
-  __ret_708; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vrshrn_high_n_s16(__p0_709, __p1_709, __p2_709) __extension__ ({ \
-  int8x8_t __s0_709 = __p0_709; \
-  int16x8_t __s1_709 = __p1_709; \
-  int8x16_t __ret_709; \
-  __ret_709 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_709), (int8x8_t)(vrshrn_n_s16(__s1_709, __p2_709)))); \
-  __ret_709; \
-})
-#else
-#define vrshrn_high_n_s16(__p0_710, __p1_710, __p2_710) __extension__ ({ \
-  int8x8_t __s0_710 = __p0_710; \
-  int16x8_t __s1_710 = __p1_710; \
-  int8x8_t __rev0_710;  __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_710;  __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_710; \
-  __ret_710 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_710), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_710, __p2_710)))); \
-  __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_710; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrte_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-__ai float64_t vrsqrted_f64(float64_t __p0) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0);
-  return __ret;
-}
-__ai float32_t vrsqrtes_f32(float32_t __p0) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10);
-  return __ret;
-}
-__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) {
-  float64_t __ret;
-  __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1);
-  return __ret;
-}
-__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) {
-  float32_t __ret;
-  __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1);
-  return __ret;
-}
-#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#else
-#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __ret; \
-  __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \
-  __ret; \
-})
-#endif
-
-#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64_t __s0 = __p0; \
-  float64x1_t __s1 = __p1; \
-  float64x1_t __ret; \
-  __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
-  __ret; \
-})
-__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1);
-  return __ret;
-}
-#define vshld_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshld_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u8(__p0_711, __p1_711) __extension__ ({ \
-  uint8x16_t __s0_711 = __p0_711; \
-  uint16x8_t __ret_711; \
-  __ret_711 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_711), __p1_711)); \
-  __ret_711; \
-})
-#else
-#define vshll_high_n_u8(__p0_712, __p1_712) __extension__ ({ \
-  uint8x16_t __s0_712 = __p0_712; \
-  uint8x16_t __rev0_712;  __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __ret_712; \
-  __ret_712 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_712), __p1_712)); \
-  __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_712; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u32(__p0_713, __p1_713) __extension__ ({ \
-  uint32x4_t __s0_713 = __p0_713; \
-  uint64x2_t __ret_713; \
-  __ret_713 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_713), __p1_713)); \
-  __ret_713; \
-})
-#else
-#define vshll_high_n_u32(__p0_714, __p1_714) __extension__ ({ \
-  uint32x4_t __s0_714 = __p0_714; \
-  uint32x4_t __rev0_714;  __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 3, 2, 1, 0); \
-  uint64x2_t __ret_714; \
-  __ret_714 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_714), __p1_714)); \
-  __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 1, 0); \
-  __ret_714; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_u16(__p0_715, __p1_715) __extension__ ({ \
-  uint16x8_t __s0_715 = __p0_715; \
-  uint32x4_t __ret_715; \
-  __ret_715 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_715), __p1_715)); \
-  __ret_715; \
-})
-#else
-#define vshll_high_n_u16(__p0_716, __p1_716) __extension__ ({ \
-  uint16x8_t __s0_716 = __p0_716; \
-  uint16x8_t __rev0_716;  __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint32x4_t __ret_716; \
-  __ret_716 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_716), __p1_716)); \
-  __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 3, 2, 1, 0); \
-  __ret_716; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s8(__p0_717, __p1_717) __extension__ ({ \
-  int8x16_t __s0_717 = __p0_717; \
-  int16x8_t __ret_717; \
-  __ret_717 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_717), __p1_717)); \
-  __ret_717; \
-})
-#else
-#define vshll_high_n_s8(__p0_718, __p1_718) __extension__ ({ \
-  int8x16_t __s0_718 = __p0_718; \
-  int8x16_t __rev0_718;  __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __ret_718; \
-  __ret_718 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_718), __p1_718)); \
-  __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_718; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s32(__p0_719, __p1_719) __extension__ ({ \
-  int32x4_t __s0_719 = __p0_719; \
-  int64x2_t __ret_719; \
-  __ret_719 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_719), __p1_719)); \
-  __ret_719; \
-})
-#else
-#define vshll_high_n_s32(__p0_720, __p1_720) __extension__ ({ \
-  int32x4_t __s0_720 = __p0_720; \
-  int32x4_t __rev0_720;  __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 3, 2, 1, 0); \
-  int64x2_t __ret_720; \
-  __ret_720 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_720), __p1_720)); \
-  __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 1, 0); \
-  __ret_720; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshll_high_n_s16(__p0_721, __p1_721) __extension__ ({ \
-  int16x8_t __s0_721 = __p0_721; \
-  int32x4_t __ret_721; \
-  __ret_721 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_721), __p1_721)); \
-  __ret_721; \
-})
-#else
-#define vshll_high_n_s16(__p0_722, __p1_722) __extension__ ({ \
-  int16x8_t __s0_722 = __p0_722; \
-  int16x8_t __rev0_722;  __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_722; \
-  __ret_722 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_722), __p1_722)); \
-  __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 3, 2, 1, 0); \
-  __ret_722; \
-})
-#endif
-
-#define vshrd_n_u64(__p0, __p1) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \
-  __ret; \
-})
-#define vshrd_n_s64(__p0, __p1) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u32(__p0_723, __p1_723, __p2_723) __extension__ ({ \
-  uint16x4_t __s0_723 = __p0_723; \
-  uint32x4_t __s1_723 = __p1_723; \
-  uint16x8_t __ret_723; \
-  __ret_723 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_723), (uint16x4_t)(vshrn_n_u32(__s1_723, __p2_723)))); \
-  __ret_723; \
-})
-#else
-#define vshrn_high_n_u32(__p0_724, __p1_724, __p2_724) __extension__ ({ \
-  uint16x4_t __s0_724 = __p0_724; \
-  uint32x4_t __s1_724 = __p1_724; \
-  uint16x4_t __rev0_724;  __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 3, 2, 1, 0); \
-  uint32x4_t __rev1_724;  __rev1_724 = __builtin_shufflevector(__s1_724, __s1_724, 3, 2, 1, 0); \
-  uint16x8_t __ret_724; \
-  __ret_724 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_724), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_724, __p2_724)))); \
-  __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_724; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u64(__p0_725, __p1_725, __p2_725) __extension__ ({ \
-  uint32x2_t __s0_725 = __p0_725; \
-  uint64x2_t __s1_725 = __p1_725; \
-  uint32x4_t __ret_725; \
-  __ret_725 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_725), (uint32x2_t)(vshrn_n_u64(__s1_725, __p2_725)))); \
-  __ret_725; \
-})
-#else
-#define vshrn_high_n_u64(__p0_726, __p1_726, __p2_726) __extension__ ({ \
-  uint32x2_t __s0_726 = __p0_726; \
-  uint64x2_t __s1_726 = __p1_726; \
-  uint32x2_t __rev0_726;  __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 1, 0); \
-  uint64x2_t __rev1_726;  __rev1_726 = __builtin_shufflevector(__s1_726, __s1_726, 1, 0); \
-  uint32x4_t __ret_726; \
-  __ret_726 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_726), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_726, __p2_726)))); \
-  __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \
-  __ret_726; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_u16(__p0_727, __p1_727, __p2_727) __extension__ ({ \
-  uint8x8_t __s0_727 = __p0_727; \
-  uint16x8_t __s1_727 = __p1_727; \
-  uint8x16_t __ret_727; \
-  __ret_727 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_727), (uint8x8_t)(vshrn_n_u16(__s1_727, __p2_727)))); \
-  __ret_727; \
-})
-#else
-#define vshrn_high_n_u16(__p0_728, __p1_728, __p2_728) __extension__ ({ \
-  uint8x8_t __s0_728 = __p0_728; \
-  uint16x8_t __s1_728 = __p1_728; \
-  uint8x8_t __rev0_728;  __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint16x8_t __rev1_728;  __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __ret_728; \
-  __ret_728 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_728), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_728, __p2_728)))); \
-  __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_728; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s32(__p0_729, __p1_729, __p2_729) __extension__ ({ \
-  int16x4_t __s0_729 = __p0_729; \
-  int32x4_t __s1_729 = __p1_729; \
-  int16x8_t __ret_729; \
-  __ret_729 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_729), (int16x4_t)(vshrn_n_s32(__s1_729, __p2_729)))); \
-  __ret_729; \
-})
-#else
-#define vshrn_high_n_s32(__p0_730, __p1_730, __p2_730) __extension__ ({ \
-  int16x4_t __s0_730 = __p0_730; \
-  int32x4_t __s1_730 = __p1_730; \
-  int16x4_t __rev0_730;  __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 3, 2, 1, 0); \
-  int32x4_t __rev1_730;  __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 3, 2, 1, 0); \
-  int16x8_t __ret_730; \
-  __ret_730 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_730), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_730, __p2_730)))); \
-  __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_730; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s64(__p0_731, __p1_731, __p2_731) __extension__ ({ \
-  int32x2_t __s0_731 = __p0_731; \
-  int64x2_t __s1_731 = __p1_731; \
-  int32x4_t __ret_731; \
-  __ret_731 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_731), (int32x2_t)(vshrn_n_s64(__s1_731, __p2_731)))); \
-  __ret_731; \
-})
-#else
-#define vshrn_high_n_s64(__p0_732, __p1_732, __p2_732) __extension__ ({ \
-  int32x2_t __s0_732 = __p0_732; \
-  int64x2_t __s1_732 = __p1_732; \
-  int32x2_t __rev0_732;  __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 1, 0); \
-  int64x2_t __rev1_732;  __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 1, 0); \
-  int32x4_t __ret_732; \
-  __ret_732 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_732), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_732, __p2_732)))); \
-  __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 3, 2, 1, 0); \
-  __ret_732; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vshrn_high_n_s16(__p0_733, __p1_733, __p2_733) __extension__ ({ \
-  int8x8_t __s0_733 = __p0_733; \
-  int16x8_t __s1_733 = __p1_733; \
-  int8x16_t __ret_733; \
-  __ret_733 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_733), (int8x8_t)(vshrn_n_s16(__s1_733, __p2_733)))); \
-  __ret_733; \
-})
-#else
-#define vshrn_high_n_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \
-  int8x8_t __s0_734 = __p0_734; \
-  int16x8_t __s1_734 = __p1_734; \
-  int8x8_t __rev0_734;  __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16x8_t __rev1_734;  __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __ret_734; \
-  __ret_734 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_734), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_734, __p2_734)))); \
-  __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_734; \
-})
-#endif
-
-#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) {
-  uint8_t __ret;
-  __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1);
-  return __ret;
-}
-__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) {
-  uint32_t __ret;
-  __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1);
-  return __ret;
-}
-__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1);
-  return __ret;
-}
-__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) {
-  uint16_t __ret;
-  __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48);
-  return __ret;
-}
-#else
-__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16);
-  return __ret;
-}
-#else
-__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18);
-  return __ret;
-}
-#else
-__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17);
-  return __ret;
-}
-#else
-__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42);
-  return __ret;
-}
-#else
-__ai float64x2_t vsqrtq_f64(float64x2_t __p0) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __ret;
-  __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41);
-  return __ret;
-}
-#else
-__ai float32x4_t vsqrtq_f32(float32x4_t __p0) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsqrt_f64(float64x1_t __p0) {
-  float64x1_t __ret;
-  __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9);
-  return __ret;
-}
-#else
-__ai float32x2_t vsqrt_f32(float32x2_t __p0) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __ret;
-  __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64_t __s0 = __p0; \
-  uint64_t __s1 = __p1; \
-  uint64_t __ret; \
-  __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64_t __s0 = __p0; \
-  int64_t __s1 = __p1; \
-  int64_t __ret; \
-  __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \
-  __ret; \
-})
-#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s0 = __p0; \
-  poly64x1_t __s1 = __p1; \
-  poly64x1_t __ret; \
-  __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \
-  __ret; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \
-  __ret; \
-})
-#else
-#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s0 = __p0; \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  poly64x2_t __ret; \
-  __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \
-  __ret; \
-})
-#endif
-
-#define vst1_p64(__p0, __p1) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \
-})
-#else
-#define vst1q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \
-})
-#else
-#define vst1q_f64(__p0, __p1) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \
-})
-#endif
-
-#define vst1_f64(__p0, __p1) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \
-})
-#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \
-})
-#else
-#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2_t __s1 = __p1; \
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \
-})
-#else
-#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2_t __s1 = __p1; \
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \
-  __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \
-})
-#endif
-
-#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1_t __s1 = __p1; \
-  __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \
-})
-#define vst1_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#define vst1_f64_x2(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst1_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#define vst1_f64_x3(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst1_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#define vst1_f64_x4(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst2_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \
-})
-#else
-#define vst2q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \
-})
-#else
-#define vst2q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \
-})
-#else
-#define vst2q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \
-})
-#else
-#define vst2q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \
-})
-#endif
-
-#define vst2_f64(__p0, __p1) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \
-})
-#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \
-})
-#else
-#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x2_t __s1 = __p1; \
-  poly8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \
-})
-#else
-#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x2_t __s1 = __p1; \
-  poly64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \
-})
-#else
-#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x2_t __s1 = __p1; \
-  uint8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \
-})
-#else
-#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x2_t __s1 = __p1; \
-  uint64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \
-})
-#else
-#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x2_t __s1 = __p1; \
-  int8x16x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \
-})
-#else
-#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x2_t __s1 = __p1; \
-  float64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \
-})
-#else
-#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x2_t __s1 = __p1; \
-  int64x2x2_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \
-})
-#endif
-
-#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \
-})
-#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \
-})
-#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x2_t __s1 = __p1; \
-  __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \
-})
-#define vst3_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \
-})
-#else
-#define vst3q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \
-})
-#else
-#define vst3q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \
-})
-#else
-#define vst3q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \
-})
-#else
-#define vst3q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \
-})
-#endif
-
-#define vst3_f64(__p0, __p1) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \
-})
-#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \
-})
-#else
-#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x3_t __s1 = __p1; \
-  poly8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \
-})
-#else
-#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x3_t __s1 = __p1; \
-  poly64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \
-})
-#else
-#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x3_t __s1 = __p1; \
-  uint8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \
-})
-#else
-#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x3_t __s1 = __p1; \
-  uint64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \
-})
-#else
-#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x3_t __s1 = __p1; \
-  int8x16x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \
-})
-#else
-#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x3_t __s1 = __p1; \
-  float64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \
-})
-#else
-#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x3_t __s1 = __p1; \
-  int64x2x3_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \
-})
-#endif
-
-#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \
-})
-#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \
-})
-#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x3_t __s1 = __p1; \
-  __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \
-})
-#define vst4_p64(__p0, __p1) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \
-})
-#else
-#define vst4q_p64(__p0, __p1) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \
-})
-#else
-#define vst4q_u64(__p0, __p1) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \
-})
-#else
-#define vst4q_f64(__p0, __p1) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \
-})
-#else
-#define vst4q_s64(__p0, __p1) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \
-})
-#endif
-
-#define vst4_f64(__p0, __p1) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \
-})
-#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \
-})
-#else
-#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \
-  poly8x16x4_t __s1 = __p1; \
-  poly8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \
-})
-#else
-#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \
-  poly64x2x4_t __s1 = __p1; \
-  poly64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \
-})
-#else
-#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \
-  uint8x16x4_t __s1 = __p1; \
-  uint8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \
-})
-#else
-#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x2x4_t __s1 = __p1; \
-  uint64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \
-})
-#else
-#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \
-  int8x16x4_t __s1 = __p1; \
-  int8x16x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \
-})
-#else
-#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x2x4_t __s1 = __p1; \
-  float64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \
-})
-#else
-#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x2x4_t __s1 = __p1; \
-  int64x2x4_t __rev1; \
-  __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \
-  __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \
-  __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \
-  __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \
-  __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \
-})
-#endif
-
-#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \
-  uint64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \
-})
-#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \
-  float64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \
-})
-#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \
-  int64x1x4_t __s1 = __p1; \
-  __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \
-})
-#define vstrq_p128(__p0, __p1) __extension__ ({ \
-  poly128_t __s1 = __p1; \
-  __builtin_neon_vstrq_p128(__p0, __s1); \
-})
-__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#else
-__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __rev0 - __rev1;
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) {
-  float64x1_t __ret;
-  __ret = __p0 - __p1;
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x8_t __ret;
-  __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x4_t __ret;
-  __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x16_t __ret;
-  __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x8_t __ret;
-  __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x4_t __ret;
-  __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x16_t __ret;
-  __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2));
-  return __ret;
-}
-#else
-__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 - vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 - vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 - vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudotq_laneq_s32(__p0_735, __p1_735, __p2_735, __p3_735) __extension__ ({ \
-  int32x4_t __s0_735 = __p0_735; \
-  int8x16_t __s1_735 = __p1_735; \
-  uint8x16_t __s2_735 = __p2_735; \
-  int32x4_t __ret_735; \
-uint8x16_t __reint_735 = __s2_735; \
-  __ret_735 = vusdotq_s32(__s0_735, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_735, __p3_735)), __s1_735); \
-  __ret_735; \
-})
-#else
-#define vsudotq_laneq_s32(__p0_736, __p1_736, __p2_736, __p3_736) __extension__ ({ \
-  int32x4_t __s0_736 = __p0_736; \
-  int8x16_t __s1_736 = __p1_736; \
-  uint8x16_t __s2_736 = __p2_736; \
-  int32x4_t __rev0_736;  __rev0_736 = __builtin_shufflevector(__s0_736, __s0_736, 3, 2, 1, 0); \
-  int8x16_t __rev1_736;  __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_736;  __rev2_736 = __builtin_shufflevector(__s2_736, __s2_736, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_736; \
-uint8x16_t __reint_736 = __rev2_736; \
-  __ret_736 = __noswap_vusdotq_s32(__rev0_736, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_736, __p3_736)), __rev1_736); \
-  __ret_736 = __builtin_shufflevector(__ret_736, __ret_736, 3, 2, 1, 0); \
-  __ret_736; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudot_laneq_s32(__p0_737, __p1_737, __p2_737, __p3_737) __extension__ ({ \
-  int32x2_t __s0_737 = __p0_737; \
-  int8x8_t __s1_737 = __p1_737; \
-  uint8x16_t __s2_737 = __p2_737; \
-  int32x2_t __ret_737; \
-uint8x16_t __reint_737 = __s2_737; \
-  __ret_737 = vusdot_s32(__s0_737, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_737, __p3_737)), __s1_737); \
-  __ret_737; \
-})
-#else
-#define vsudot_laneq_s32(__p0_738, __p1_738, __p2_738, __p3_738) __extension__ ({ \
-  int32x2_t __s0_738 = __p0_738; \
-  int8x8_t __s1_738 = __p1_738; \
-  uint8x16_t __s2_738 = __p2_738; \
-  int32x2_t __rev0_738;  __rev0_738 = __builtin_shufflevector(__s0_738, __s0_738, 1, 0); \
-  int8x8_t __rev1_738;  __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x16_t __rev2_738;  __rev2_738 = __builtin_shufflevector(__s2_738, __s2_738, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_738; \
-uint8x16_t __reint_738 = __rev2_738; \
-  __ret_738 = __noswap_vusdot_s32(__rev0_738, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_738, __p3_738)), __rev1_738); \
-  __ret_738 = __builtin_shufflevector(__ret_738, __ret_738, 1, 0); \
-  __ret_738; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51);
-  return __ret;
-}
-#else
-__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) {
-  uint64x1_t __ret;
-  __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19);
-  return __ret;
-}
-__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) {
-  uint64_t __ret;
-  __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1);
-  return __ret;
-}
-__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1);
-  return __ret;
-}
-__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1);
-  return __ret;
-}
-__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32);
-  return __ret;
-}
-#else
-__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34);
-  return __ret;
-}
-#else
-__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35);
-  return __ret;
-}
-#else
-__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33);
-  return __ret;
-}
-#else
-__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0);
-  return __ret;
-}
-#else
-__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) {
-  int64x1_t __ret;
-  __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3);
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1);
-  return __ret;
-}
-#else
-__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdotq_laneq_s32(__p0_739, __p1_739, __p2_739, __p3_739) __extension__ ({ \
-  int32x4_t __s0_739 = __p0_739; \
-  uint8x16_t __s1_739 = __p1_739; \
-  int8x16_t __s2_739 = __p2_739; \
-  int32x4_t __ret_739; \
-int8x16_t __reint_739 = __s2_739; \
-  __ret_739 = vusdotq_s32(__s0_739, __s1_739, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_739, __p3_739))); \
-  __ret_739; \
-})
-#else
-#define vusdotq_laneq_s32(__p0_740, __p1_740, __p2_740, __p3_740) __extension__ ({ \
-  int32x4_t __s0_740 = __p0_740; \
-  uint8x16_t __s1_740 = __p1_740; \
-  int8x16_t __s2_740 = __p2_740; \
-  int32x4_t __rev0_740;  __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 3, 2, 1, 0); \
-  uint8x16_t __rev1_740;  __rev1_740 = __builtin_shufflevector(__s1_740, __s1_740, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_740;  __rev2_740 = __builtin_shufflevector(__s2_740, __s2_740, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_740; \
-int8x16_t __reint_740 = __rev2_740; \
-  __ret_740 = __noswap_vusdotq_s32(__rev0_740, __rev1_740, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_740, __p3_740))); \
-  __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 3, 2, 1, 0); \
-  __ret_740; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vusdot_laneq_s32(__p0_741, __p1_741, __p2_741, __p3_741) __extension__ ({ \
-  int32x2_t __s0_741 = __p0_741; \
-  uint8x8_t __s1_741 = __p1_741; \
-  int8x16_t __s2_741 = __p2_741; \
-  int32x2_t __ret_741; \
-int8x16_t __reint_741 = __s2_741; \
-  __ret_741 = vusdot_s32(__s0_741, __s1_741, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_741, __p3_741))); \
-  __ret_741; \
-})
-#else
-#define vusdot_laneq_s32(__p0_742, __p1_742, __p2_742, __p3_742) __extension__ ({ \
-  int32x2_t __s0_742 = __p0_742; \
-  uint8x8_t __s1_742 = __p1_742; \
-  int8x16_t __s2_742 = __p2_742; \
-  int32x2_t __rev0_742;  __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 1, 0); \
-  uint8x8_t __rev1_742;  __rev1_742 = __builtin_shufflevector(__s1_742, __s1_742, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int8x16_t __rev2_742;  __rev2_742 = __builtin_shufflevector(__s2_742, __s2_742, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_742; \
-int8x16_t __reint_742 = __rev2_742; \
-  __ret_742 = __noswap_vusdot_s32(__rev0_742, __rev1_742, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_742, __p3_742))); \
-  __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 1, 0); \
-  __ret_742; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 2);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) {
-  poly8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) {
-  poly16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  poly16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  poly16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) {
-  poly8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) {
-  poly64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  poly64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  poly64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) {
-  poly16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  poly16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  return __ret;
-}
-#else
-__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) {
-  float64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) {
-  float32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  float32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15);
-  return __ret;
-}
-#else
-__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) {
-  float32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  float32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  float32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 1, 3);
-  return __ret;
-}
-#else
-__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7);
-  return __ret;
-}
-#else
-__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __ret;
-  __ret = __p0 + vabdq_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdq_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdq_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __ret;
-  __ret = __p0 + vabdq_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdq_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdq_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdq_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __ret;
-  __ret = __p0 + vabd_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __ret;
-  __ret = __p0 + vabd_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __ret;
-  __ret = __p0 + vabd_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __ret;
-  __ret = __p0 + vabd_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __ret;
-  __ret = __rev0 + __noswap_vabd_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __ret;
-  __ret = __p0 + vabd_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int32x2_t __ret;
-  __ret = __rev0 + __noswap_vabd_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __ret;
-  __ret = __p0 + vabd_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int16x4_t __ret;
-  __ret = __rev0 + __noswap_vabd_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__rev0, __rev1))));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1))));
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_u8(__p0) + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) {
-  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_u8(__rev0) + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_u32(__p0) + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) {
-  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_u32(__rev0) + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_u16(__p0) + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) {
-  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_u16(__rev0) + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_s8(__p0) + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_s8(__rev0) + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_s32(__p0) + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_s32(__rev0) + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_s16(__p0) + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_s16(__rev0) + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vget_lane_f16(__p0_743, __p1_743) __extension__ ({ \
-  float16x4_t __s0_743 = __p0_743; \
-  float16_t __ret_743; \
-float16x4_t __reint_743 = __s0_743; \
-int16_t __reint1_743 = vget_lane_s16(*(int16x4_t *) &__reint_743, __p1_743); \
-  __ret_743 = *(float16_t *) &__reint1_743; \
-  __ret_743; \
-})
-#else
-#define vget_lane_f16(__p0_744, __p1_744) __extension__ ({ \
-  float16x4_t __s0_744 = __p0_744; \
-  float16x4_t __rev0_744;  __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 3, 2, 1, 0); \
-  float16_t __ret_744; \
-float16x4_t __reint_744 = __rev0_744; \
-int16_t __reint1_744 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_744, __p1_744); \
-  __ret_744 = *(float16_t *) &__reint1_744; \
-  __ret_744; \
-})
-#define __noswap_vget_lane_f16(__p0_745, __p1_745) __extension__ ({ \
-  float16x4_t __s0_745 = __p0_745; \
-  float16_t __ret_745; \
-float16x4_t __reint_745 = __s0_745; \
-int16_t __reint1_745 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_745, __p1_745); \
-  __ret_745 = *(float16_t *) &__reint1_745; \
-  __ret_745; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vgetq_lane_f16(__p0_746, __p1_746) __extension__ ({ \
-  float16x8_t __s0_746 = __p0_746; \
-  float16_t __ret_746; \
-float16x8_t __reint_746 = __s0_746; \
-int16_t __reint1_746 = vgetq_lane_s16(*(int16x8_t *) &__reint_746, __p1_746); \
-  __ret_746 = *(float16_t *) &__reint1_746; \
-  __ret_746; \
-})
-#else
-#define vgetq_lane_f16(__p0_747, __p1_747) __extension__ ({ \
-  float16x8_t __s0_747 = __p0_747; \
-  float16x8_t __rev0_747;  __rev0_747 = __builtin_shufflevector(__s0_747, __s0_747, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_747; \
-float16x8_t __reint_747 = __rev0_747; \
-int16_t __reint1_747 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_747, __p1_747); \
-  __ret_747 = *(float16_t *) &__reint1_747; \
-  __ret_747; \
-})
-#define __noswap_vgetq_lane_f16(__p0_748, __p1_748) __extension__ ({ \
-  float16x8_t __s0_748 = __p0_748; \
-  float16_t __ret_748; \
-float16x8_t __reint_748 = __s0_748; \
-int16_t __reint1_748 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_748, __p1_748); \
-  __ret_748 = *(float16_t *) &__reint1_748; \
-  __ret_748; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u32(__p0_749, __p1_749, __p2_749, __p3_749) __extension__ ({ \
-  uint64x2_t __s0_749 = __p0_749; \
-  uint32x2_t __s1_749 = __p1_749; \
-  uint32x2_t __s2_749 = __p2_749; \
-  uint64x2_t __ret_749; \
-  __ret_749 = __s0_749 + vmull_u32(__s1_749, splat_lane_u32(__s2_749, __p3_749)); \
-  __ret_749; \
-})
-#else
-#define vmlal_lane_u32(__p0_750, __p1_750, __p2_750, __p3_750) __extension__ ({ \
-  uint64x2_t __s0_750 = __p0_750; \
-  uint32x2_t __s1_750 = __p1_750; \
-  uint32x2_t __s2_750 = __p2_750; \
-  uint64x2_t __rev0_750;  __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 1, 0); \
-  uint32x2_t __rev1_750;  __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 1, 0); \
-  uint32x2_t __rev2_750;  __rev2_750 = __builtin_shufflevector(__s2_750, __s2_750, 1, 0); \
-  uint64x2_t __ret_750; \
-  __ret_750 = __rev0_750 + __noswap_vmull_u32(__rev1_750, __noswap_splat_lane_u32(__rev2_750, __p3_750)); \
-  __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 1, 0); \
-  __ret_750; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_u16(__p0_751, __p1_751, __p2_751, __p3_751) __extension__ ({ \
-  uint32x4_t __s0_751 = __p0_751; \
-  uint16x4_t __s1_751 = __p1_751; \
-  uint16x4_t __s2_751 = __p2_751; \
-  uint32x4_t __ret_751; \
-  __ret_751 = __s0_751 + vmull_u16(__s1_751, splat_lane_u16(__s2_751, __p3_751)); \
-  __ret_751; \
-})
-#else
-#define vmlal_lane_u16(__p0_752, __p1_752, __p2_752, __p3_752) __extension__ ({ \
-  uint32x4_t __s0_752 = __p0_752; \
-  uint16x4_t __s1_752 = __p1_752; \
-  uint16x4_t __s2_752 = __p2_752; \
-  uint32x4_t __rev0_752;  __rev0_752 = __builtin_shufflevector(__s0_752, __s0_752, 3, 2, 1, 0); \
-  uint16x4_t __rev1_752;  __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 3, 2, 1, 0); \
-  uint16x4_t __rev2_752;  __rev2_752 = __builtin_shufflevector(__s2_752, __s2_752, 3, 2, 1, 0); \
-  uint32x4_t __ret_752; \
-  __ret_752 = __rev0_752 + __noswap_vmull_u16(__rev1_752, __noswap_splat_lane_u16(__rev2_752, __p3_752)); \
-  __ret_752 = __builtin_shufflevector(__ret_752, __ret_752, 3, 2, 1, 0); \
-  __ret_752; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s32(__p0_753, __p1_753, __p2_753, __p3_753) __extension__ ({ \
-  int64x2_t __s0_753 = __p0_753; \
-  int32x2_t __s1_753 = __p1_753; \
-  int32x2_t __s2_753 = __p2_753; \
-  int64x2_t __ret_753; \
-  __ret_753 = __s0_753 + vmull_s32(__s1_753, splat_lane_s32(__s2_753, __p3_753)); \
-  __ret_753; \
-})
-#else
-#define vmlal_lane_s32(__p0_754, __p1_754, __p2_754, __p3_754) __extension__ ({ \
-  int64x2_t __s0_754 = __p0_754; \
-  int32x2_t __s1_754 = __p1_754; \
-  int32x2_t __s2_754 = __p2_754; \
-  int64x2_t __rev0_754;  __rev0_754 = __builtin_shufflevector(__s0_754, __s0_754, 1, 0); \
-  int32x2_t __rev1_754;  __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 1, 0); \
-  int32x2_t __rev2_754;  __rev2_754 = __builtin_shufflevector(__s2_754, __s2_754, 1, 0); \
-  int64x2_t __ret_754; \
-  __ret_754 = __rev0_754 + __noswap_vmull_s32(__rev1_754, __noswap_splat_lane_s32(__rev2_754, __p3_754)); \
-  __ret_754 = __builtin_shufflevector(__ret_754, __ret_754, 1, 0); \
-  __ret_754; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlal_lane_s16(__p0_755, __p1_755, __p2_755, __p3_755) __extension__ ({ \
-  int32x4_t __s0_755 = __p0_755; \
-  int16x4_t __s1_755 = __p1_755; \
-  int16x4_t __s2_755 = __p2_755; \
-  int32x4_t __ret_755; \
-  __ret_755 = __s0_755 + vmull_s16(__s1_755, splat_lane_s16(__s2_755, __p3_755)); \
-  __ret_755; \
-})
-#else
-#define vmlal_lane_s16(__p0_756, __p1_756, __p2_756, __p3_756) __extension__ ({ \
-  int32x4_t __s0_756 = __p0_756; \
-  int16x4_t __s1_756 = __p1_756; \
-  int16x4_t __s2_756 = __p2_756; \
-  int32x4_t __rev0_756;  __rev0_756 = __builtin_shufflevector(__s0_756, __s0_756, 3, 2, 1, 0); \
-  int16x4_t __rev1_756;  __rev1_756 = __builtin_shufflevector(__s1_756, __s1_756, 3, 2, 1, 0); \
-  int16x4_t __rev2_756;  __rev2_756 = __builtin_shufflevector(__s2_756, __s2_756, 3, 2, 1, 0); \
-  int32x4_t __ret_756; \
-  __ret_756 = __rev0_756 + __noswap_vmull_s16(__rev1_756, __noswap_splat_lane_s16(__rev2_756, __p3_756)); \
-  __ret_756 = __builtin_shufflevector(__ret_756, __ret_756, 3, 2, 1, 0); \
-  __ret_756; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - vmull_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - vmull_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 - __noswap_vmull_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 - __noswap_vmull_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u32(__p0_757, __p1_757, __p2_757, __p3_757) __extension__ ({ \
-  uint64x2_t __s0_757 = __p0_757; \
-  uint32x2_t __s1_757 = __p1_757; \
-  uint32x2_t __s2_757 = __p2_757; \
-  uint64x2_t __ret_757; \
-  __ret_757 = __s0_757 - vmull_u32(__s1_757, splat_lane_u32(__s2_757, __p3_757)); \
-  __ret_757; \
-})
-#else
-#define vmlsl_lane_u32(__p0_758, __p1_758, __p2_758, __p3_758) __extension__ ({ \
-  uint64x2_t __s0_758 = __p0_758; \
-  uint32x2_t __s1_758 = __p1_758; \
-  uint32x2_t __s2_758 = __p2_758; \
-  uint64x2_t __rev0_758;  __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 1, 0); \
-  uint32x2_t __rev1_758;  __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 1, 0); \
-  uint32x2_t __rev2_758;  __rev2_758 = __builtin_shufflevector(__s2_758, __s2_758, 1, 0); \
-  uint64x2_t __ret_758; \
-  __ret_758 = __rev0_758 - __noswap_vmull_u32(__rev1_758, __noswap_splat_lane_u32(__rev2_758, __p3_758)); \
-  __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 1, 0); \
-  __ret_758; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_u16(__p0_759, __p1_759, __p2_759, __p3_759) __extension__ ({ \
-  uint32x4_t __s0_759 = __p0_759; \
-  uint16x4_t __s1_759 = __p1_759; \
-  uint16x4_t __s2_759 = __p2_759; \
-  uint32x4_t __ret_759; \
-  __ret_759 = __s0_759 - vmull_u16(__s1_759, splat_lane_u16(__s2_759, __p3_759)); \
-  __ret_759; \
-})
-#else
-#define vmlsl_lane_u16(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \
-  uint32x4_t __s0_760 = __p0_760; \
-  uint16x4_t __s1_760 = __p1_760; \
-  uint16x4_t __s2_760 = __p2_760; \
-  uint32x4_t __rev0_760;  __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 3, 2, 1, 0); \
-  uint16x4_t __rev1_760;  __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \
-  uint16x4_t __rev2_760;  __rev2_760 = __builtin_shufflevector(__s2_760, __s2_760, 3, 2, 1, 0); \
-  uint32x4_t __ret_760; \
-  __ret_760 = __rev0_760 - __noswap_vmull_u16(__rev1_760, __noswap_splat_lane_u16(__rev2_760, __p3_760)); \
-  __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 3, 2, 1, 0); \
-  __ret_760; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s32(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \
-  int64x2_t __s0_761 = __p0_761; \
-  int32x2_t __s1_761 = __p1_761; \
-  int32x2_t __s2_761 = __p2_761; \
-  int64x2_t __ret_761; \
-  __ret_761 = __s0_761 - vmull_s32(__s1_761, splat_lane_s32(__s2_761, __p3_761)); \
-  __ret_761; \
-})
-#else
-#define vmlsl_lane_s32(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \
-  int64x2_t __s0_762 = __p0_762; \
-  int32x2_t __s1_762 = __p1_762; \
-  int32x2_t __s2_762 = __p2_762; \
-  int64x2_t __rev0_762;  __rev0_762 = __builtin_shufflevector(__s0_762, __s0_762, 1, 0); \
-  int32x2_t __rev1_762;  __rev1_762 = __builtin_shufflevector(__s1_762, __s1_762, 1, 0); \
-  int32x2_t __rev2_762;  __rev2_762 = __builtin_shufflevector(__s2_762, __s2_762, 1, 0); \
-  int64x2_t __ret_762; \
-  __ret_762 = __rev0_762 - __noswap_vmull_s32(__rev1_762, __noswap_splat_lane_s32(__rev2_762, __p3_762)); \
-  __ret_762 = __builtin_shufflevector(__ret_762, __ret_762, 1, 0); \
-  __ret_762; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmlsl_lane_s16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \
-  int32x4_t __s0_763 = __p0_763; \
-  int16x4_t __s1_763 = __p1_763; \
-  int16x4_t __s2_763 = __p2_763; \
-  int32x4_t __ret_763; \
-  __ret_763 = __s0_763 - vmull_s16(__s1_763, splat_lane_s16(__s2_763, __p3_763)); \
-  __ret_763; \
-})
-#else
-#define vmlsl_lane_s16(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \
-  int32x4_t __s0_764 = __p0_764; \
-  int16x4_t __s1_764 = __p1_764; \
-  int16x4_t __s2_764 = __p2_764; \
-  int32x4_t __rev0_764;  __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 3, 2, 1, 0); \
-  int16x4_t __rev1_764;  __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 3, 2, 1, 0); \
-  int16x4_t __rev2_764;  __rev2_764 = __builtin_shufflevector(__s2_764, __s2_764, 3, 2, 1, 0); \
-  int32x4_t __ret_764; \
-  __ret_764 = __rev0_764 - __noswap_vmull_s16(__rev1_764, __noswap_splat_lane_s16(__rev2_764, __p3_764)); \
-  __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 3, 2, 1, 0); \
-  __ret_764; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_u32(__rev1, (uint32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_u16(__rev1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 - __noswap_vmull_s32(__rev1, (int32x2_t) {__p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 - __noswap_vmull_s16(__rev1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2});
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vset_lane_f16(__p0_765, __p1_765, __p2_765) __extension__ ({ \
-  float16_t __s0_765 = __p0_765; \
-  float16x4_t __s1_765 = __p1_765; \
-  float16x4_t __ret_765; \
-float16_t __reint_765 = __s0_765; \
-float16x4_t __reint1_765 = __s1_765; \
-int16x4_t __reint2_765 = vset_lane_s16(*(int16_t *) &__reint_765, *(int16x4_t *) &__reint1_765, __p2_765); \
-  __ret_765 = *(float16x4_t *) &__reint2_765; \
-  __ret_765; \
-})
-#else
-#define vset_lane_f16(__p0_766, __p1_766, __p2_766) __extension__ ({ \
-  float16_t __s0_766 = __p0_766; \
-  float16x4_t __s1_766 = __p1_766; \
-  float16x4_t __rev1_766;  __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \
-  float16x4_t __ret_766; \
-float16_t __reint_766 = __s0_766; \
-float16x4_t __reint1_766 = __rev1_766; \
-int16x4_t __reint2_766 = __noswap_vset_lane_s16(*(int16_t *) &__reint_766, *(int16x4_t *) &__reint1_766, __p2_766); \
-  __ret_766 = *(float16x4_t *) &__reint2_766; \
-  __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 3, 2, 1, 0); \
-  __ret_766; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsetq_lane_f16(__p0_767, __p1_767, __p2_767) __extension__ ({ \
-  float16_t __s0_767 = __p0_767; \
-  float16x8_t __s1_767 = __p1_767; \
-  float16x8_t __ret_767; \
-float16_t __reint_767 = __s0_767; \
-float16x8_t __reint1_767 = __s1_767; \
-int16x8_t __reint2_767 = vsetq_lane_s16(*(int16_t *) &__reint_767, *(int16x8_t *) &__reint1_767, __p2_767); \
-  __ret_767 = *(float16x8_t *) &__reint2_767; \
-  __ret_767; \
-})
-#else
-#define vsetq_lane_f16(__p0_768, __p1_768, __p2_768) __extension__ ({ \
-  float16_t __s0_768 = __p0_768; \
-  float16x8_t __s1_768 = __p1_768; \
-  float16x8_t __rev1_768;  __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __ret_768; \
-float16_t __reint_768 = __s0_768; \
-float16x8_t __reint1_768 = __rev1_768; \
-int16x8_t __reint2_768 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_768, *(int16x8_t *) &__reint1_768, __p2_768); \
-  __ret_768 = *(float16x8_t *) &__reint2_768; \
-  __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \
-  __ret_768; \
-})
-#endif
-
-#if defined(__ARM_FEATURE_BF16_VECTOR_ARITHMETIC)
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_lane_f32(__p0_769, __p1_769, __p2_769, __p3_769) __extension__ ({ \
-  float32x4_t __s0_769 = __p0_769; \
-  bfloat16x8_t __s1_769 = __p1_769; \
-  bfloat16x4_t __s2_769 = __p2_769; \
-  float32x4_t __ret_769; \
-  __ret_769 = vbfmlalbq_f32(__s0_769, __s1_769, (bfloat16x8_t) {vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769), vget_lane_bf16(__s2_769, __p3_769)}); \
-  __ret_769; \
-})
-#else
-#define vbfmlalbq_lane_f32(__p0_770, __p1_770, __p2_770, __p3_770) __extension__ ({ \
-  float32x4_t __s0_770 = __p0_770; \
-  bfloat16x8_t __s1_770 = __p1_770; \
-  bfloat16x4_t __s2_770 = __p2_770; \
-  float32x4_t __rev0_770;  __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_770;  __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_770;  __rev2_770 = __builtin_shufflevector(__s2_770, __s2_770, 3, 2, 1, 0); \
-  float32x4_t __ret_770; \
-  __ret_770 = __noswap_vbfmlalbq_f32(__rev0_770, __rev1_770, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770), __noswap_vget_lane_bf16(__rev2_770, __p3_770)}); \
-  __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \
-  __ret_770; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlalbq_laneq_f32(__p0_771, __p1_771, __p2_771, __p3_771) __extension__ ({ \
-  float32x4_t __s0_771 = __p0_771; \
-  bfloat16x8_t __s1_771 = __p1_771; \
-  bfloat16x8_t __s2_771 = __p2_771; \
-  float32x4_t __ret_771; \
-  __ret_771 = vbfmlalbq_f32(__s0_771, __s1_771, (bfloat16x8_t) {vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771), vgetq_lane_bf16(__s2_771, __p3_771)}); \
-  __ret_771; \
-})
-#else
-#define vbfmlalbq_laneq_f32(__p0_772, __p1_772, __p2_772, __p3_772) __extension__ ({ \
-  float32x4_t __s0_772 = __p0_772; \
-  bfloat16x8_t __s1_772 = __p1_772; \
-  bfloat16x8_t __s2_772 = __p2_772; \
-  float32x4_t __rev0_772;  __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_772;  __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_772;  __rev2_772 = __builtin_shufflevector(__s2_772, __s2_772, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_772; \
-  __ret_772 = __noswap_vbfmlalbq_f32(__rev0_772, __rev1_772, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772), __noswap_vgetq_lane_bf16(__rev2_772, __p3_772)}); \
-  __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 3, 2, 1, 0); \
-  __ret_772; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_lane_f32(__p0_773, __p1_773, __p2_773, __p3_773) __extension__ ({ \
-  float32x4_t __s0_773 = __p0_773; \
-  bfloat16x8_t __s1_773 = __p1_773; \
-  bfloat16x4_t __s2_773 = __p2_773; \
-  float32x4_t __ret_773; \
-  __ret_773 = vbfmlaltq_f32(__s0_773, __s1_773, (bfloat16x8_t) {vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773), vget_lane_bf16(__s2_773, __p3_773)}); \
-  __ret_773; \
-})
-#else
-#define vbfmlaltq_lane_f32(__p0_774, __p1_774, __p2_774, __p3_774) __extension__ ({ \
-  float32x4_t __s0_774 = __p0_774; \
-  bfloat16x8_t __s1_774 = __p1_774; \
-  bfloat16x4_t __s2_774 = __p2_774; \
-  float32x4_t __rev0_774;  __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_774;  __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x4_t __rev2_774;  __rev2_774 = __builtin_shufflevector(__s2_774, __s2_774, 3, 2, 1, 0); \
-  float32x4_t __ret_774; \
-  __ret_774 = __noswap_vbfmlaltq_f32(__rev0_774, __rev1_774, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774), __noswap_vget_lane_bf16(__rev2_774, __p3_774)}); \
-  __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \
-  __ret_774; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vbfmlaltq_laneq_f32(__p0_775, __p1_775, __p2_775, __p3_775) __extension__ ({ \
-  float32x4_t __s0_775 = __p0_775; \
-  bfloat16x8_t __s1_775 = __p1_775; \
-  bfloat16x8_t __s2_775 = __p2_775; \
-  float32x4_t __ret_775; \
-  __ret_775 = vbfmlaltq_f32(__s0_775, __s1_775, (bfloat16x8_t) {vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775), vgetq_lane_bf16(__s2_775, __p3_775)}); \
-  __ret_775; \
-})
-#else
-#define vbfmlaltq_laneq_f32(__p0_776, __p1_776, __p2_776, __p3_776) __extension__ ({ \
-  float32x4_t __s0_776 = __p0_776; \
-  bfloat16x8_t __s1_776 = __p1_776; \
-  bfloat16x8_t __s2_776 = __p2_776; \
-  float32x4_t __rev0_776;  __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 3, 2, 1, 0); \
-  bfloat16x8_t __rev1_776;  __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \
-  bfloat16x8_t __rev2_776;  __rev2_776 = __builtin_shufflevector(__s2_776, __s2_776, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_776; \
-  __ret_776 = __noswap_vbfmlaltq_f32(__rev0_776, __rev1_776, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776), __noswap_vgetq_lane_bf16(__rev2_776, __p3_776)}); \
-  __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 3, 2, 1, 0); \
-  __ret_776; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_bf16(vget_high_bf16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  float32x4_t __ret;
-  __ret = vcvt_f32_bf16(vget_low_bf16(__p0));
-  return __ret;
-}
-#else
-__ai float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) {
-  bfloat16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  float32x4_t __ret;
-  __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_high_f16(__p0_777, __p1_777, __p2_777, __p3_777) __extension__ ({ \
-  float32x4_t __s0_777 = __p0_777; \
-  float16x8_t __s1_777 = __p1_777; \
-  float16x4_t __s2_777 = __p2_777; \
-  float32x4_t __ret_777; \
-  __ret_777 = vfmlalq_high_f16(__s0_777, __s1_777, (float16x8_t) {vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777), vget_lane_f16(__s2_777, __p3_777)}); \
-  __ret_777; \
-})
-#else
-#define vfmlalq_lane_high_f16(__p0_778, __p1_778, __p2_778, __p3_778) __extension__ ({ \
-  float32x4_t __s0_778 = __p0_778; \
-  float16x8_t __s1_778 = __p1_778; \
-  float16x4_t __s2_778 = __p2_778; \
-  float32x4_t __rev0_778;  __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \
-  float16x8_t __rev1_778;  __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_778;  __rev2_778 = __builtin_shufflevector(__s2_778, __s2_778, 3, 2, 1, 0); \
-  float32x4_t __ret_778; \
-  __ret_778 = __noswap_vfmlalq_high_f16(__rev0_778, __rev1_778, (float16x8_t) {__noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778), __noswap_vget_lane_f16(__rev2_778, __p3_778)}); \
-  __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 3, 2, 1, 0); \
-  __ret_778; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_high_f16(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \
-  float32x2_t __s0_779 = __p0_779; \
-  float16x4_t __s1_779 = __p1_779; \
-  float16x4_t __s2_779 = __p2_779; \
-  float32x2_t __ret_779; \
-  __ret_779 = vfmlal_high_f16(__s0_779, __s1_779, (float16x4_t) {vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779), vget_lane_f16(__s2_779, __p3_779)}); \
-  __ret_779; \
-})
-#else
-#define vfmlal_lane_high_f16(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \
-  float32x2_t __s0_780 = __p0_780; \
-  float16x4_t __s1_780 = __p1_780; \
-  float16x4_t __s2_780 = __p2_780; \
-  float32x2_t __rev0_780;  __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 1, 0); \
-  float16x4_t __rev1_780;  __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 3, 2, 1, 0); \
-  float16x4_t __rev2_780;  __rev2_780 = __builtin_shufflevector(__s2_780, __s2_780, 3, 2, 1, 0); \
-  float32x2_t __ret_780; \
-  __ret_780 = __noswap_vfmlal_high_f16(__rev0_780, __rev1_780, (float16x4_t) {__noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780), __noswap_vget_lane_f16(__rev2_780, __p3_780)}); \
-  __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 1, 0); \
-  __ret_780; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_lane_low_f16(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \
-  float32x4_t __s0_781 = __p0_781; \
-  float16x8_t __s1_781 = __p1_781; \
-  float16x4_t __s2_781 = __p2_781; \
-  float32x4_t __ret_781; \
-  __ret_781 = vfmlalq_low_f16(__s0_781, __s1_781, (float16x8_t) {vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781), vget_lane_f16(__s2_781, __p3_781)}); \
-  __ret_781; \
-})
-#else
-#define vfmlalq_lane_low_f16(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \
-  float32x4_t __s0_782 = __p0_782; \
-  float16x8_t __s1_782 = __p1_782; \
-  float16x4_t __s2_782 = __p2_782; \
-  float32x4_t __rev0_782;  __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 3, 2, 1, 0); \
-  float16x8_t __rev1_782;  __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_782;  __rev2_782 = __builtin_shufflevector(__s2_782, __s2_782, 3, 2, 1, 0); \
-  float32x4_t __ret_782; \
-  __ret_782 = __noswap_vfmlalq_low_f16(__rev0_782, __rev1_782, (float16x8_t) {__noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782), __noswap_vget_lane_f16(__rev2_782, __p3_782)}); \
-  __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 3, 2, 1, 0); \
-  __ret_782; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_lane_low_f16(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \
-  float32x2_t __s0_783 = __p0_783; \
-  float16x4_t __s1_783 = __p1_783; \
-  float16x4_t __s2_783 = __p2_783; \
-  float32x2_t __ret_783; \
-  __ret_783 = vfmlal_low_f16(__s0_783, __s1_783, (float16x4_t) {vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783), vget_lane_f16(__s2_783, __p3_783)}); \
-  __ret_783; \
-})
-#else
-#define vfmlal_lane_low_f16(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \
-  float32x2_t __s0_784 = __p0_784; \
-  float16x4_t __s1_784 = __p1_784; \
-  float16x4_t __s2_784 = __p2_784; \
-  float32x2_t __rev0_784;  __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 1, 0); \
-  float16x4_t __rev1_784;  __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 3, 2, 1, 0); \
-  float16x4_t __rev2_784;  __rev2_784 = __builtin_shufflevector(__s2_784, __s2_784, 3, 2, 1, 0); \
-  float32x2_t __ret_784; \
-  __ret_784 = __noswap_vfmlal_low_f16(__rev0_784, __rev1_784, (float16x4_t) {__noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784), __noswap_vget_lane_f16(__rev2_784, __p3_784)}); \
-  __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 1, 0); \
-  __ret_784; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_high_f16(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \
-  float32x4_t __s0_785 = __p0_785; \
-  float16x8_t __s1_785 = __p1_785; \
-  float16x8_t __s2_785 = __p2_785; \
-  float32x4_t __ret_785; \
-  __ret_785 = vfmlalq_high_f16(__s0_785, __s1_785, (float16x8_t) {vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785), vgetq_lane_f16(__s2_785, __p3_785)}); \
-  __ret_785; \
-})
-#else
-#define vfmlalq_laneq_high_f16(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \
-  float32x4_t __s0_786 = __p0_786; \
-  float16x8_t __s1_786 = __p1_786; \
-  float16x8_t __s2_786 = __p2_786; \
-  float32x4_t __rev0_786;  __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 3, 2, 1, 0); \
-  float16x8_t __rev1_786;  __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_786;  __rev2_786 = __builtin_shufflevector(__s2_786, __s2_786, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_786; \
-  __ret_786 = __noswap_vfmlalq_high_f16(__rev0_786, __rev1_786, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786), __noswap_vgetq_lane_f16(__rev2_786, __p3_786)}); \
-  __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 3, 2, 1, 0); \
-  __ret_786; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_high_f16(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \
-  float32x2_t __s0_787 = __p0_787; \
-  float16x4_t __s1_787 = __p1_787; \
-  float16x8_t __s2_787 = __p2_787; \
-  float32x2_t __ret_787; \
-  __ret_787 = vfmlal_high_f16(__s0_787, __s1_787, (float16x4_t) {vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787), vgetq_lane_f16(__s2_787, __p3_787)}); \
-  __ret_787; \
-})
-#else
-#define vfmlal_laneq_high_f16(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \
-  float32x2_t __s0_788 = __p0_788; \
-  float16x4_t __s1_788 = __p1_788; \
-  float16x8_t __s2_788 = __p2_788; \
-  float32x2_t __rev0_788;  __rev0_788 = __builtin_shufflevector(__s0_788, __s0_788, 1, 0); \
-  float16x4_t __rev1_788;  __rev1_788 = __builtin_shufflevector(__s1_788, __s1_788, 3, 2, 1, 0); \
-  float16x8_t __rev2_788;  __rev2_788 = __builtin_shufflevector(__s2_788, __s2_788, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_788; \
-  __ret_788 = __noswap_vfmlal_high_f16(__rev0_788, __rev1_788, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788), __noswap_vgetq_lane_f16(__rev2_788, __p3_788)}); \
-  __ret_788 = __builtin_shufflevector(__ret_788, __ret_788, 1, 0); \
-  __ret_788; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlalq_laneq_low_f16(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \
-  float32x4_t __s0_789 = __p0_789; \
-  float16x8_t __s1_789 = __p1_789; \
-  float16x8_t __s2_789 = __p2_789; \
-  float32x4_t __ret_789; \
-  __ret_789 = vfmlalq_low_f16(__s0_789, __s1_789, (float16x8_t) {vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789), vgetq_lane_f16(__s2_789, __p3_789)}); \
-  __ret_789; \
-})
-#else
-#define vfmlalq_laneq_low_f16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \
-  float32x4_t __s0_790 = __p0_790; \
-  float16x8_t __s1_790 = __p1_790; \
-  float16x8_t __s2_790 = __p2_790; \
-  float32x4_t __rev0_790;  __rev0_790 = __builtin_shufflevector(__s0_790, __s0_790, 3, 2, 1, 0); \
-  float16x8_t __rev1_790;  __rev1_790 = __builtin_shufflevector(__s1_790, __s1_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_790;  __rev2_790 = __builtin_shufflevector(__s2_790, __s2_790, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_790; \
-  __ret_790 = __noswap_vfmlalq_low_f16(__rev0_790, __rev1_790, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790), __noswap_vgetq_lane_f16(__rev2_790, __p3_790)}); \
-  __ret_790 = __builtin_shufflevector(__ret_790, __ret_790, 3, 2, 1, 0); \
-  __ret_790; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlal_laneq_low_f16(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \
-  float32x2_t __s0_791 = __p0_791; \
-  float16x4_t __s1_791 = __p1_791; \
-  float16x8_t __s2_791 = __p2_791; \
-  float32x2_t __ret_791; \
-  __ret_791 = vfmlal_low_f16(__s0_791, __s1_791, (float16x4_t) {vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791), vgetq_lane_f16(__s2_791, __p3_791)}); \
-  __ret_791; \
-})
-#else
-#define vfmlal_laneq_low_f16(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \
-  float32x2_t __s0_792 = __p0_792; \
-  float16x4_t __s1_792 = __p1_792; \
-  float16x8_t __s2_792 = __p2_792; \
-  float32x2_t __rev0_792;  __rev0_792 = __builtin_shufflevector(__s0_792, __s0_792, 1, 0); \
-  float16x4_t __rev1_792;  __rev1_792 = __builtin_shufflevector(__s1_792, __s1_792, 3, 2, 1, 0); \
-  float16x8_t __rev2_792;  __rev2_792 = __builtin_shufflevector(__s2_792, __s2_792, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_792; \
-  __ret_792 = __noswap_vfmlal_low_f16(__rev0_792, __rev1_792, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792), __noswap_vgetq_lane_f16(__rev2_792, __p3_792)}); \
-  __ret_792 = __builtin_shufflevector(__ret_792, __ret_792, 1, 0); \
-  __ret_792; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_high_f16(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \
-  float32x4_t __s0_793 = __p0_793; \
-  float16x8_t __s1_793 = __p1_793; \
-  float16x4_t __s2_793 = __p2_793; \
-  float32x4_t __ret_793; \
-  __ret_793 = vfmlslq_high_f16(__s0_793, __s1_793, (float16x8_t) {vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793), vget_lane_f16(__s2_793, __p3_793)}); \
-  __ret_793; \
-})
-#else
-#define vfmlslq_lane_high_f16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \
-  float32x4_t __s0_794 = __p0_794; \
-  float16x8_t __s1_794 = __p1_794; \
-  float16x4_t __s2_794 = __p2_794; \
-  float32x4_t __rev0_794;  __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 3, 2, 1, 0); \
-  float16x8_t __rev1_794;  __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_794;  __rev2_794 = __builtin_shufflevector(__s2_794, __s2_794, 3, 2, 1, 0); \
-  float32x4_t __ret_794; \
-  __ret_794 = __noswap_vfmlslq_high_f16(__rev0_794, __rev1_794, (float16x8_t) {__noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794), __noswap_vget_lane_f16(__rev2_794, __p3_794)}); \
-  __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 3, 2, 1, 0); \
-  __ret_794; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_high_f16(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \
-  float32x2_t __s0_795 = __p0_795; \
-  float16x4_t __s1_795 = __p1_795; \
-  float16x4_t __s2_795 = __p2_795; \
-  float32x2_t __ret_795; \
-  __ret_795 = vfmlsl_high_f16(__s0_795, __s1_795, (float16x4_t) {vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795), vget_lane_f16(__s2_795, __p3_795)}); \
-  __ret_795; \
-})
-#else
-#define vfmlsl_lane_high_f16(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \
-  float32x2_t __s0_796 = __p0_796; \
-  float16x4_t __s1_796 = __p1_796; \
-  float16x4_t __s2_796 = __p2_796; \
-  float32x2_t __rev0_796;  __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 1, 0); \
-  float16x4_t __rev1_796;  __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \
-  float16x4_t __rev2_796;  __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 3, 2, 1, 0); \
-  float32x2_t __ret_796; \
-  __ret_796 = __noswap_vfmlsl_high_f16(__rev0_796, __rev1_796, (float16x4_t) {__noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796), __noswap_vget_lane_f16(__rev2_796, __p3_796)}); \
-  __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 1, 0); \
-  __ret_796; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_lane_low_f16(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \
-  float32x4_t __s0_797 = __p0_797; \
-  float16x8_t __s1_797 = __p1_797; \
-  float16x4_t __s2_797 = __p2_797; \
-  float32x4_t __ret_797; \
-  __ret_797 = vfmlslq_low_f16(__s0_797, __s1_797, (float16x8_t) {vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797), vget_lane_f16(__s2_797, __p3_797)}); \
-  __ret_797; \
-})
-#else
-#define vfmlslq_lane_low_f16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \
-  float32x4_t __s0_798 = __p0_798; \
-  float16x8_t __s1_798 = __p1_798; \
-  float16x4_t __s2_798 = __p2_798; \
-  float32x4_t __rev0_798;  __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 3, 2, 1, 0); \
-  float16x8_t __rev1_798;  __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x4_t __rev2_798;  __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 3, 2, 1, 0); \
-  float32x4_t __ret_798; \
-  __ret_798 = __noswap_vfmlslq_low_f16(__rev0_798, __rev1_798, (float16x8_t) {__noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798), __noswap_vget_lane_f16(__rev2_798, __p3_798)}); \
-  __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 3, 2, 1, 0); \
-  __ret_798; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_lane_low_f16(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \
-  float32x2_t __s0_799 = __p0_799; \
-  float16x4_t __s1_799 = __p1_799; \
-  float16x4_t __s2_799 = __p2_799; \
-  float32x2_t __ret_799; \
-  __ret_799 = vfmlsl_low_f16(__s0_799, __s1_799, (float16x4_t) {vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799), vget_lane_f16(__s2_799, __p3_799)}); \
-  __ret_799; \
-})
-#else
-#define vfmlsl_lane_low_f16(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \
-  float32x2_t __s0_800 = __p0_800; \
-  float16x4_t __s1_800 = __p1_800; \
-  float16x4_t __s2_800 = __p2_800; \
-  float32x2_t __rev0_800;  __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \
-  float16x4_t __rev1_800;  __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 3, 2, 1, 0); \
-  float16x4_t __rev2_800;  __rev2_800 = __builtin_shufflevector(__s2_800, __s2_800, 3, 2, 1, 0); \
-  float32x2_t __ret_800; \
-  __ret_800 = __noswap_vfmlsl_low_f16(__rev0_800, __rev1_800, (float16x4_t) {__noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800), __noswap_vget_lane_f16(__rev2_800, __p3_800)}); \
-  __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 1, 0); \
-  __ret_800; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_high_f16(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \
-  float32x4_t __s0_801 = __p0_801; \
-  float16x8_t __s1_801 = __p1_801; \
-  float16x8_t __s2_801 = __p2_801; \
-  float32x4_t __ret_801; \
-  __ret_801 = vfmlslq_high_f16(__s0_801, __s1_801, (float16x8_t) {vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801), vgetq_lane_f16(__s2_801, __p3_801)}); \
-  __ret_801; \
-})
-#else
-#define vfmlslq_laneq_high_f16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \
-  float32x4_t __s0_802 = __p0_802; \
-  float16x8_t __s1_802 = __p1_802; \
-  float16x8_t __s2_802 = __p2_802; \
-  float32x4_t __rev0_802;  __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \
-  float16x8_t __rev1_802;  __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_802;  __rev2_802 = __builtin_shufflevector(__s2_802, __s2_802, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_802; \
-  __ret_802 = __noswap_vfmlslq_high_f16(__rev0_802, __rev1_802, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802), __noswap_vgetq_lane_f16(__rev2_802, __p3_802)}); \
-  __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 3, 2, 1, 0); \
-  __ret_802; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_high_f16(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \
-  float32x2_t __s0_803 = __p0_803; \
-  float16x4_t __s1_803 = __p1_803; \
-  float16x8_t __s2_803 = __p2_803; \
-  float32x2_t __ret_803; \
-  __ret_803 = vfmlsl_high_f16(__s0_803, __s1_803, (float16x4_t) {vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803), vgetq_lane_f16(__s2_803, __p3_803)}); \
-  __ret_803; \
-})
-#else
-#define vfmlsl_laneq_high_f16(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \
-  float32x2_t __s0_804 = __p0_804; \
-  float16x4_t __s1_804 = __p1_804; \
-  float16x8_t __s2_804 = __p2_804; \
-  float32x2_t __rev0_804;  __rev0_804 = __builtin_shufflevector(__s0_804, __s0_804, 1, 0); \
-  float16x4_t __rev1_804;  __rev1_804 = __builtin_shufflevector(__s1_804, __s1_804, 3, 2, 1, 0); \
-  float16x8_t __rev2_804;  __rev2_804 = __builtin_shufflevector(__s2_804, __s2_804, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_804; \
-  __ret_804 = __noswap_vfmlsl_high_f16(__rev0_804, __rev1_804, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804), __noswap_vgetq_lane_f16(__rev2_804, __p3_804)}); \
-  __ret_804 = __builtin_shufflevector(__ret_804, __ret_804, 1, 0); \
-  __ret_804; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlslq_laneq_low_f16(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \
-  float32x4_t __s0_805 = __p0_805; \
-  float16x8_t __s1_805 = __p1_805; \
-  float16x8_t __s2_805 = __p2_805; \
-  float32x4_t __ret_805; \
-  __ret_805 = vfmlslq_low_f16(__s0_805, __s1_805, (float16x8_t) {vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805), vgetq_lane_f16(__s2_805, __p3_805)}); \
-  __ret_805; \
-})
-#else
-#define vfmlslq_laneq_low_f16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \
-  float32x4_t __s0_806 = __p0_806; \
-  float16x8_t __s1_806 = __p1_806; \
-  float16x8_t __s2_806 = __p2_806; \
-  float32x4_t __rev0_806;  __rev0_806 = __builtin_shufflevector(__s0_806, __s0_806, 3, 2, 1, 0); \
-  float16x8_t __rev1_806;  __rev1_806 = __builtin_shufflevector(__s1_806, __s1_806, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16x8_t __rev2_806;  __rev2_806 = __builtin_shufflevector(__s2_806, __s2_806, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x4_t __ret_806; \
-  __ret_806 = __noswap_vfmlslq_low_f16(__rev0_806, __rev1_806, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806), __noswap_vgetq_lane_f16(__rev2_806, __p3_806)}); \
-  __ret_806 = __builtin_shufflevector(__ret_806, __ret_806, 3, 2, 1, 0); \
-  __ret_806; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vfmlsl_laneq_low_f16(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \
-  float32x2_t __s0_807 = __p0_807; \
-  float16x4_t __s1_807 = __p1_807; \
-  float16x8_t __s2_807 = __p2_807; \
-  float32x2_t __ret_807; \
-  __ret_807 = vfmlsl_low_f16(__s0_807, __s1_807, (float16x4_t) {vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807), vgetq_lane_f16(__s2_807, __p3_807)}); \
-  __ret_807; \
-})
-#else
-#define vfmlsl_laneq_low_f16(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \
-  float32x2_t __s0_808 = __p0_808; \
-  float16x4_t __s1_808 = __p1_808; \
-  float16x8_t __s2_808 = __p2_808; \
-  float32x2_t __rev0_808;  __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 1, 0); \
-  float16x4_t __rev1_808;  __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 3, 2, 1, 0); \
-  float16x8_t __rev2_808;  __rev2_808 = __builtin_shufflevector(__s2_808, __s2_808, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float32x2_t __ret_808; \
-  __ret_808 = __noswap_vfmlsl_low_f16(__rev0_808, __rev1_808, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808), __noswap_vgetq_lane_f16(__rev2_808, __p3_808)}); \
-  __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 1, 0); \
-  __ret_808; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_lane_f16(__p0_809, __p1_809, __p2_809) __extension__ ({ \
-  float16_t __s0_809 = __p0_809; \
-  float16x4_t __s1_809 = __p1_809; \
-  float16_t __ret_809; \
-  __ret_809 = __s0_809 * vget_lane_f16(__s1_809, __p2_809); \
-  __ret_809; \
-})
-#else
-#define vmulh_lane_f16(__p0_810, __p1_810, __p2_810) __extension__ ({ \
-  float16_t __s0_810 = __p0_810; \
-  float16x4_t __s1_810 = __p1_810; \
-  float16x4_t __rev1_810;  __rev1_810 = __builtin_shufflevector(__s1_810, __s1_810, 3, 2, 1, 0); \
-  float16_t __ret_810; \
-  __ret_810 = __s0_810 * __noswap_vget_lane_f16(__rev1_810, __p2_810); \
-  __ret_810; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vmulh_laneq_f16(__p0_811, __p1_811, __p2_811) __extension__ ({ \
-  float16_t __s0_811 = __p0_811; \
-  float16x8_t __s1_811 = __p1_811; \
-  float16_t __ret_811; \
-  __ret_811 = __s0_811 * vgetq_lane_f16(__s1_811, __p2_811); \
-  __ret_811; \
-})
-#else
-#define vmulh_laneq_f16(__p0_812, __p1_812, __p2_812) __extension__ ({ \
-  float16_t __s0_812 = __p0_812; \
-  float16x8_t __s1_812 = __p1_812; \
-  float16x8_t __rev1_812;  __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 7, 6, 5, 4, 3, 2, 1, 0); \
-  float16_t __ret_812; \
-  __ret_812 = __s0_812 * __noswap_vgetq_lane_f16(__rev1_812, __p2_812); \
-  __ret_812; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_MATMUL_INT8)
-#ifdef __LITTLE_ENDIAN__
-#define vsudotq_lane_s32(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \
-  int32x4_t __s0_813 = __p0_813; \
-  int8x16_t __s1_813 = __p1_813; \
-  uint8x8_t __s2_813 = __p2_813; \
-  int32x4_t __ret_813; \
-uint8x8_t __reint_813 = __s2_813; \
-  __ret_813 = vusdotq_s32(__s0_813, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_813, __p3_813)), __s1_813); \
-  __ret_813; \
-})
-#else
-#define vsudotq_lane_s32(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \
-  int32x4_t __s0_814 = __p0_814; \
-  int8x16_t __s1_814 = __p1_814; \
-  uint8x8_t __s2_814 = __p2_814; \
-  int32x4_t __rev0_814;  __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 3, 2, 1, 0); \
-  int8x16_t __rev1_814;  __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_814;  __rev2_814 = __builtin_shufflevector(__s2_814, __s2_814, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x4_t __ret_814; \
-uint8x8_t __reint_814 = __rev2_814; \
-  __ret_814 = __noswap_vusdotq_s32(__rev0_814, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_814, __p3_814)), __rev1_814); \
-  __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 3, 2, 1, 0); \
-  __ret_814; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vsudot_lane_s32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \
-  int32x2_t __s0_815 = __p0_815; \
-  int8x8_t __s1_815 = __p1_815; \
-  uint8x8_t __s2_815 = __p2_815; \
-  int32x2_t __ret_815; \
-uint8x8_t __reint_815 = __s2_815; \
-  __ret_815 = vusdot_s32(__s0_815, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_815, __p3_815)), __s1_815); \
-  __ret_815; \
-})
-#else
-#define vsudot_lane_s32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \
-  int32x2_t __s0_816 = __p0_816; \
-  int8x8_t __s1_816 = __p1_816; \
-  uint8x8_t __s2_816 = __p2_816; \
-  int32x2_t __rev0_816;  __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \
-  int8x8_t __rev1_816;  __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 7, 6, 5, 4, 3, 2, 1, 0); \
-  uint8x8_t __rev2_816;  __rev2_816 = __builtin_shufflevector(__s2_816, __s2_816, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int32x2_t __ret_816; \
-uint8x8_t __reint_816 = __rev2_816; \
-  __ret_816 = __noswap_vusdot_s32(__rev0_816, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_816, __p3_816)), __rev1_816); \
-  __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 1, 0); \
-  __ret_816; \
-})
-#endif
-
-#endif
-#if defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)
-__ai int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqadds_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqaddh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_lane_s32(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \
-  int32_t __s0_817 = __p0_817; \
-  int32_t __s1_817 = __p1_817; \
-  int32x2_t __s2_817 = __p2_817; \
-  int32_t __ret_817; \
-  __ret_817 = vqadds_s32(__s0_817, vqrdmulhs_s32(__s1_817, vget_lane_s32(__s2_817, __p3_817))); \
-  __ret_817; \
-})
-#else
-#define vqrdmlahs_lane_s32(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \
-  int32_t __s0_818 = __p0_818; \
-  int32_t __s1_818 = __p1_818; \
-  int32x2_t __s2_818 = __p2_818; \
-  int32x2_t __rev2_818;  __rev2_818 = __builtin_shufflevector(__s2_818, __s2_818, 1, 0); \
-  int32_t __ret_818; \
-  __ret_818 = vqadds_s32(__s0_818, vqrdmulhs_s32(__s1_818, __noswap_vget_lane_s32(__rev2_818, __p3_818))); \
-  __ret_818; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_lane_s16(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \
-  int16_t __s0_819 = __p0_819; \
-  int16_t __s1_819 = __p1_819; \
-  int16x4_t __s2_819 = __p2_819; \
-  int16_t __ret_819; \
-  __ret_819 = vqaddh_s16(__s0_819, vqrdmulhh_s16(__s1_819, vget_lane_s16(__s2_819, __p3_819))); \
-  __ret_819; \
-})
-#else
-#define vqrdmlahh_lane_s16(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \
-  int16_t __s0_820 = __p0_820; \
-  int16_t __s1_820 = __p1_820; \
-  int16x4_t __s2_820 = __p2_820; \
-  int16x4_t __rev2_820;  __rev2_820 = __builtin_shufflevector(__s2_820, __s2_820, 3, 2, 1, 0); \
-  int16_t __ret_820; \
-  __ret_820 = vqaddh_s16(__s0_820, vqrdmulhh_s16(__s1_820, __noswap_vget_lane_s16(__rev2_820, __p3_820))); \
-  __ret_820; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahs_laneq_s32(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \
-  int32_t __s0_821 = __p0_821; \
-  int32_t __s1_821 = __p1_821; \
-  int32x4_t __s2_821 = __p2_821; \
-  int32_t __ret_821; \
-  __ret_821 = vqadds_s32(__s0_821, vqrdmulhs_s32(__s1_821, vgetq_lane_s32(__s2_821, __p3_821))); \
-  __ret_821; \
-})
-#else
-#define vqrdmlahs_laneq_s32(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \
-  int32_t __s0_822 = __p0_822; \
-  int32_t __s1_822 = __p1_822; \
-  int32x4_t __s2_822 = __p2_822; \
-  int32x4_t __rev2_822;  __rev2_822 = __builtin_shufflevector(__s2_822, __s2_822, 3, 2, 1, 0); \
-  int32_t __ret_822; \
-  __ret_822 = vqadds_s32(__s0_822, vqrdmulhs_s32(__s1_822, __noswap_vgetq_lane_s32(__rev2_822, __p3_822))); \
-  __ret_822; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlahh_laneq_s16(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \
-  int16_t __s0_823 = __p0_823; \
-  int16_t __s1_823 = __p1_823; \
-  int16x8_t __s2_823 = __p2_823; \
-  int16_t __ret_823; \
-  __ret_823 = vqaddh_s16(__s0_823, vqrdmulhh_s16(__s1_823, vgetq_lane_s16(__s2_823, __p3_823))); \
-  __ret_823; \
-})
-#else
-#define vqrdmlahh_laneq_s16(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \
-  int16_t __s0_824 = __p0_824; \
-  int16_t __s1_824 = __p1_824; \
-  int16x8_t __s2_824 = __p2_824; \
-  int16x8_t __rev2_824;  __rev2_824 = __builtin_shufflevector(__s2_824, __s2_824, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_824; \
-  __ret_824 = vqaddh_s16(__s0_824, vqrdmulhh_s16(__s1_824, __noswap_vgetq_lane_s16(__rev2_824, __p3_824))); \
-  __ret_824; \
-})
-#endif
-
-__ai int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) {
-  int32_t __ret;
-  __ret = vqsubs_s32(__p0, vqrdmulhs_s32(__p1, __p2));
-  return __ret;
-}
-__ai int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) {
-  int16_t __ret;
-  __ret = vqsubh_s16(__p0, vqrdmulhh_s16(__p1, __p2));
-  return __ret;
-}
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_lane_s32(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \
-  int32_t __s0_825 = __p0_825; \
-  int32_t __s1_825 = __p1_825; \
-  int32x2_t __s2_825 = __p2_825; \
-  int32_t __ret_825; \
-  __ret_825 = vqsubs_s32(__s0_825, vqrdmulhs_s32(__s1_825, vget_lane_s32(__s2_825, __p3_825))); \
-  __ret_825; \
-})
-#else
-#define vqrdmlshs_lane_s32(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \
-  int32_t __s0_826 = __p0_826; \
-  int32_t __s1_826 = __p1_826; \
-  int32x2_t __s2_826 = __p2_826; \
-  int32x2_t __rev2_826;  __rev2_826 = __builtin_shufflevector(__s2_826, __s2_826, 1, 0); \
-  int32_t __ret_826; \
-  __ret_826 = vqsubs_s32(__s0_826, vqrdmulhs_s32(__s1_826, __noswap_vget_lane_s32(__rev2_826, __p3_826))); \
-  __ret_826; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_lane_s16(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \
-  int16_t __s0_827 = __p0_827; \
-  int16_t __s1_827 = __p1_827; \
-  int16x4_t __s2_827 = __p2_827; \
-  int16_t __ret_827; \
-  __ret_827 = vqsubh_s16(__s0_827, vqrdmulhh_s16(__s1_827, vget_lane_s16(__s2_827, __p3_827))); \
-  __ret_827; \
-})
-#else
-#define vqrdmlshh_lane_s16(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \
-  int16_t __s0_828 = __p0_828; \
-  int16_t __s1_828 = __p1_828; \
-  int16x4_t __s2_828 = __p2_828; \
-  int16x4_t __rev2_828;  __rev2_828 = __builtin_shufflevector(__s2_828, __s2_828, 3, 2, 1, 0); \
-  int16_t __ret_828; \
-  __ret_828 = vqsubh_s16(__s0_828, vqrdmulhh_s16(__s1_828, __noswap_vget_lane_s16(__rev2_828, __p3_828))); \
-  __ret_828; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshs_laneq_s32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \
-  int32_t __s0_829 = __p0_829; \
-  int32_t __s1_829 = __p1_829; \
-  int32x4_t __s2_829 = __p2_829; \
-  int32_t __ret_829; \
-  __ret_829 = vqsubs_s32(__s0_829, vqrdmulhs_s32(__s1_829, vgetq_lane_s32(__s2_829, __p3_829))); \
-  __ret_829; \
-})
-#else
-#define vqrdmlshs_laneq_s32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \
-  int32_t __s0_830 = __p0_830; \
-  int32_t __s1_830 = __p1_830; \
-  int32x4_t __s2_830 = __p2_830; \
-  int32x4_t __rev2_830;  __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 3, 2, 1, 0); \
-  int32_t __ret_830; \
-  __ret_830 = vqsubs_s32(__s0_830, vqrdmulhs_s32(__s1_830, __noswap_vgetq_lane_s32(__rev2_830, __p3_830))); \
-  __ret_830; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vqrdmlshh_laneq_s16(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \
-  int16_t __s0_831 = __p0_831; \
-  int16_t __s1_831 = __p1_831; \
-  int16x8_t __s2_831 = __p2_831; \
-  int16_t __ret_831; \
-  __ret_831 = vqsubh_s16(__s0_831, vqrdmulhh_s16(__s1_831, vgetq_lane_s16(__s2_831, __p3_831))); \
-  __ret_831; \
-})
-#else
-#define vqrdmlshh_laneq_s16(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \
-  int16_t __s0_832 = __p0_832; \
-  int16_t __s1_832 = __p1_832; \
-  int16x8_t __s2_832 = __p2_832; \
-  int16x8_t __rev2_832;  __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 7, 6, 5, 4, 3, 2, 1, 0); \
-  int16_t __ret_832; \
-  __ret_832 = vqsubh_s16(__s0_832, vqrdmulhh_s16(__s1_832, __noswap_vgetq_lane_s16(__rev2_832, __p3_832))); \
-  __ret_832; \
-})
-#endif
-
-#endif
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabdl_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabdl_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabdl_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1));
-  return __ret;
-}
-#else
-__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabdl_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1));
-  return __ret;
-}
-#else
-__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabdl_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1));
-  return __ret;
-}
-#else
-__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabdl_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) {
-  uint8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmovl_high_u8(__rev0) + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmovl_high_u32(__rev0) + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmovl_high_u16(__rev0) + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) {
-  int8x16_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmovl_high_s8(__rev0) + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmovl_high_s32(__rev0) + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmovl_high_s16(__rev0) + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __ret;
-  __ret = __p0 + vmovl_high_u8(__p1);
-  return __ret;
-}
-#else
-__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __ret;
-  __ret = __p0 + vmovl_high_u32(__p1);
-  return __ret;
-}
-#else
-__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __ret;
-  __ret = __p0 + vmovl_high_u16(__p1);
-  return __ret;
-}
-#else
-__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_u16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __ret;
-  __ret = __p0 + vmovl_high_s8(__p1);
-  return __ret;
-}
-#else
-__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s8(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __ret;
-  __ret = __p0 + vmovl_high_s32(__p1);
-  return __ret;
-}
-#else
-__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s32(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __ret;
-  __ret = __p0 + vmovl_high_s16(__p1);
-  return __ret;
-}
-#else
-__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vmovl_high_s16(__rev1);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_p64(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \
-  poly64x2_t __s0_833 = __p0_833; \
-  poly64x1_t __s2_833 = __p2_833; \
-  poly64x2_t __ret_833; \
-  __ret_833 = vsetq_lane_p64(vget_lane_p64(__s2_833, __p3_833), __s0_833, __p1_833); \
-  __ret_833; \
-})
-#else
-#define vcopyq_lane_p64(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \
-  poly64x2_t __s0_834 = __p0_834; \
-  poly64x1_t __s2_834 = __p2_834; \
-  poly64x2_t __rev0_834;  __rev0_834 = __builtin_shufflevector(__s0_834, __s0_834, 1, 0); \
-  poly64x2_t __ret_834; \
-  __ret_834 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_834, __p3_834), __rev0_834, __p1_834); \
-  __ret_834 = __builtin_shufflevector(__ret_834, __ret_834, 1, 0); \
-  __ret_834; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_lane_f64(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \
-  float64x2_t __s0_835 = __p0_835; \
-  float64x1_t __s2_835 = __p2_835; \
-  float64x2_t __ret_835; \
-  __ret_835 = vsetq_lane_f64(vget_lane_f64(__s2_835, __p3_835), __s0_835, __p1_835); \
-  __ret_835; \
-})
-#else
-#define vcopyq_lane_f64(__p0_836, __p1_836, __p2_836, __p3_836) __extension__ ({ \
-  float64x2_t __s0_836 = __p0_836; \
-  float64x1_t __s2_836 = __p2_836; \
-  float64x2_t __rev0_836;  __rev0_836 = __builtin_shufflevector(__s0_836, __s0_836, 1, 0); \
-  float64x2_t __ret_836; \
-  __ret_836 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_836, __p3_836), __rev0_836, __p1_836); \
-  __ret_836 = __builtin_shufflevector(__ret_836, __ret_836, 1, 0); \
-  __ret_836; \
-})
-#endif
-
-#define vcopy_lane_p64(__p0_837, __p1_837, __p2_837, __p3_837) __extension__ ({ \
-  poly64x1_t __s0_837 = __p0_837; \
-  poly64x1_t __s2_837 = __p2_837; \
-  poly64x1_t __ret_837; \
-  __ret_837 = vset_lane_p64(vget_lane_p64(__s2_837, __p3_837), __s0_837, __p1_837); \
-  __ret_837; \
-})
-#define vcopy_lane_f64(__p0_838, __p1_838, __p2_838, __p3_838) __extension__ ({ \
-  float64x1_t __s0_838 = __p0_838; \
-  float64x1_t __s2_838 = __p2_838; \
-  float64x1_t __ret_838; \
-  __ret_838 = vset_lane_f64(vget_lane_f64(__s2_838, __p3_838), __s0_838, __p1_838); \
-  __ret_838; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_p64(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \
-  poly64x2_t __s0_839 = __p0_839; \
-  poly64x2_t __s2_839 = __p2_839; \
-  poly64x2_t __ret_839; \
-  __ret_839 = vsetq_lane_p64(vgetq_lane_p64(__s2_839, __p3_839), __s0_839, __p1_839); \
-  __ret_839; \
-})
-#else
-#define vcopyq_laneq_p64(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \
-  poly64x2_t __s0_840 = __p0_840; \
-  poly64x2_t __s2_840 = __p2_840; \
-  poly64x2_t __rev0_840;  __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 1, 0); \
-  poly64x2_t __rev2_840;  __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 1, 0); \
-  poly64x2_t __ret_840; \
-  __ret_840 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_840, __p3_840), __rev0_840, __p1_840); \
-  __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 1, 0); \
-  __ret_840; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopyq_laneq_f64(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \
-  float64x2_t __s0_841 = __p0_841; \
-  float64x2_t __s2_841 = __p2_841; \
-  float64x2_t __ret_841; \
-  __ret_841 = vsetq_lane_f64(vgetq_lane_f64(__s2_841, __p3_841), __s0_841, __p1_841); \
-  __ret_841; \
-})
-#else
-#define vcopyq_laneq_f64(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \
-  float64x2_t __s0_842 = __p0_842; \
-  float64x2_t __s2_842 = __p2_842; \
-  float64x2_t __rev0_842;  __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \
-  float64x2_t __rev2_842;  __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 1, 0); \
-  float64x2_t __ret_842; \
-  __ret_842 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_842, __p3_842), __rev0_842, __p1_842); \
-  __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \
-  __ret_842; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_p64(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \
-  poly64x1_t __s0_843 = __p0_843; \
-  poly64x2_t __s2_843 = __p2_843; \
-  poly64x1_t __ret_843; \
-  __ret_843 = vset_lane_p64(vgetq_lane_p64(__s2_843, __p3_843), __s0_843, __p1_843); \
-  __ret_843; \
-})
-#else
-#define vcopy_laneq_p64(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \
-  poly64x1_t __s0_844 = __p0_844; \
-  poly64x2_t __s2_844 = __p2_844; \
-  poly64x2_t __rev2_844;  __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 1, 0); \
-  poly64x1_t __ret_844; \
-  __ret_844 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_844, __p3_844), __s0_844, __p1_844); \
-  __ret_844; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-#define vcopy_laneq_f64(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \
-  float64x1_t __s0_845 = __p0_845; \
-  float64x2_t __s2_845 = __p2_845; \
-  float64x1_t __ret_845; \
-  __ret_845 = vset_lane_f64(vgetq_lane_f64(__s2_845, __p3_845), __s0_845, __p1_845); \
-  __ret_845; \
-})
-#else
-#define vcopy_laneq_f64(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \
-  float64x1_t __s0_846 = __p0_846; \
-  float64x2_t __s2_846 = __p2_846; \
-  float64x2_t __rev2_846;  __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \
-  float64x1_t __ret_846; \
-  __ret_846 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_846, __p3_846), __s0_846, __p1_846); \
-  __ret_846; \
-})
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlal_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlal_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vmlsl_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vmlsl_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __ret;
-  __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vmlsl_n_u32(__rev0, __noswap_vget_high_u32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __ret;
-  __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vmlsl_n_u16(__rev0, __noswap_vget_high_u16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __ret;
-  __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __ret;
-  __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#define vmulx_lane_f64(__p0_847, __p1_847, __p2_847) __extension__ ({ \
-  float64x1_t __s0_847 = __p0_847; \
-  float64x1_t __s1_847 = __p1_847; \
-  float64x1_t __ret_847; \
-  float64_t __x_847 = vget_lane_f64(__s0_847, 0); \
-  float64_t __y_847 = vget_lane_f64(__s1_847, __p2_847); \
-  float64_t __z_847 = vmulxd_f64(__x_847, __y_847); \
-  __ret_847 = vset_lane_f64(__z_847, __s0_847, __p2_847); \
-  __ret_847; \
-})
-#ifdef __LITTLE_ENDIAN__
-#define vmulx_laneq_f64(__p0_848, __p1_848, __p2_848) __extension__ ({ \
-  float64x1_t __s0_848 = __p0_848; \
-  float64x2_t __s1_848 = __p1_848; \
-  float64x1_t __ret_848; \
-  float64_t __x_848 = vget_lane_f64(__s0_848, 0); \
-  float64_t __y_848 = vgetq_lane_f64(__s1_848, __p2_848); \
-  float64_t __z_848 = vmulxd_f64(__x_848, __y_848); \
-  __ret_848 = vset_lane_f64(__z_848, __s0_848, 0); \
-  __ret_848; \
-})
-#else
-#define vmulx_laneq_f64(__p0_849, __p1_849, __p2_849) __extension__ ({ \
-  float64x1_t __s0_849 = __p0_849; \
-  float64x2_t __s1_849 = __p1_849; \
-  float64x2_t __rev1_849;  __rev1_849 = __builtin_shufflevector(__s1_849, __s1_849, 1, 0); \
-  float64x1_t __ret_849; \
-  float64_t __x_849 = vget_lane_f64(__s0_849, 0); \
-  float64_t __y_849 = __noswap_vgetq_lane_f64(__rev1_849, __p2_849); \
-  float64_t __z_849 = vmulxd_f64(__x_849, __y_849); \
-  __ret_849 = vset_lane_f64(__z_849, __s0_849, 0); \
-  __ret_849; \
-})
-#endif
-
-#endif
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) {
-  uint16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_u8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  uint32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) {
-  uint64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_u32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_u16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) {
-  uint32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_u16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s8(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) {
-  int16x8_t __ret;
-  __ret = __p0 + __noswap_vabdl_s8(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
-  int32x2_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0);
-  int64x2_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s32(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) {
-  int64x2_t __ret;
-  __ret = __p0 + __noswap_vabdl_s32(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int16x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __rev0 + __noswap_vabdl_s16(__rev1, __rev2);
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) {
-  int32x4_t __ret;
-  __ret = __p0 + __noswap_vabdl_s16(__p1, __p2);
-  return __ret;
-}
-#endif
-
-#if defined(__aarch64__)
-#ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __ret;
-  __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2));
-  return __ret;
-}
-#else
-__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) {
-  uint16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __ret;
-  __ret = __noswap_vabal_u8(__rev0, __noswap_vget_high_u8(__rev1), __noswap_vget_high_u8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __ret;
-  __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2));
-  return __ret;
-}
-#else
-__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) {
-  uint64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  uint32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  uint32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  uint64x2_t __ret;
-  __ret = __noswap_vabal_u32(__rev0, __noswap_vget_high_u32(__rev1), __noswap_vget_high_u32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __ret;
-  __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2));
-  return __ret;
-}
-#else
-__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) {
-  uint32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  uint16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  uint32x4_t __ret;
-  __ret = __noswap_vabal_u16(__rev0, __noswap_vget_high_u16(__rev1), __noswap_vget_high_u16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __ret;
-  __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2));
-  return __ret;
-}
-#else
-__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) {
-  int16x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int8x16_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __ret;
-  __ret = __noswap_vabal_s8(__rev0, __noswap_vget_high_s8(__rev1), __noswap_vget_high_s8(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __ret;
-  __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2));
-  return __ret;
-}
-#else
-__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) {
-  int64x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
-  int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
-  int32x4_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0);
-  int64x2_t __ret;
-  __ret = __noswap_vabal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 1, 0);
-  return __ret;
-}
-#endif
-
-#ifdef __LITTLE_ENDIAN__
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __ret;
-  __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2));
-  return __ret;
-}
-#else
-__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) {
-  int32x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
-  int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
-  int16x8_t __rev2;  __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0);
-  int32x4_t __ret;
-  __ret = __noswap_vabal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2));
-  __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0);
-  return __ret;
-}
-#endif
-
-#endif
-
-#undef __ai
-
-#endif /* if !defined(__ARM_NEON) */
-#endif /* ifndef __ARM_FP */
-#endif /* __ARM_NEON_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/ia32intrin.h b/linux-x86/lib64/clang/11.0.5/include/ia32intrin.h
deleted file mode 100644
index 79b7f06..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/ia32intrin.h
+++ /dev/null
@@ -1,432 +0,0 @@
-/* ===-------- ia32intrin.h ---------------------------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __X86INTRIN_H
-#error "Never use <ia32intrin.h> directly; include <x86intrin.h> instead."
-#endif
-
-#ifndef __IA32INTRIN_H
-#define __IA32INTRIN_H
-
-/** Find the first set bit starting from the lsb. Result is undefined if
- *  input is 0.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> BSF </c> instruction or the
- *  <c> TZCNT </c> instruction.
- *
- *  \param __A
- *     A 32-bit integer operand.
- *  \returns A 32-bit integer containing the bit number.
- */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__bsfd(int __A) {
-  return __builtin_ctz(__A);
-}
-
-/** Find the first set bit starting from the msb. Result is undefined if
- *  input is 0.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> BSR </c> instruction or the
- *  <c> LZCNT </c> instruction and an <c> XOR </c>.
- *
- *  \param __A
- *     A 32-bit integer operand.
- *  \returns A 32-bit integer containing the bit number.
- */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__bsrd(int __A) {
-  return 31 - __builtin_clz(__A);
-}
-
-/** Swaps the bytes in the input. Converting little endian to big endian or
- *  vice versa.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> BSWAP </c> instruction.
- *
- *  \param __A
- *     A 32-bit integer operand.
- *  \returns A 32-bit integer containing the swapped bytes.
- */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__bswapd(int __A) {
-  return __builtin_bswap32(__A);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-_bswap(int __A) {
-  return __builtin_bswap32(__A);
-}
-
-#define _bit_scan_forward(A) __bsfd((A))
-#define _bit_scan_reverse(A) __bsrd((A))
-
-#ifdef __x86_64__
-/** Find the first set bit starting from the lsb. Result is undefined if
- *  input is 0.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> BSF </c> instruction or the
- *  <c> TZCNT </c> instruction.
- *
- *  \param __A
- *     A 64-bit integer operand.
- *  \returns A 32-bit integer containing the bit number.
- */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__bsfq(long long __A) {
-  return __builtin_ctzll(__A);
-}
-
-/** Find the first set bit starting from the msb. Result is undefined if
- *  input is 0.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> BSR </c> instruction or the
- *  <c> LZCNT </c> instruction and an <c> XOR </c>.
- *
- *  \param __A
- *     A 64-bit integer operand.
- *  \returns A 32-bit integer containing the bit number.
- */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__bsrq(long long __A) {
-  return 63 - __builtin_clzll(__A);
-}
-
-/** Swaps the bytes in the input. Converting little endian to big endian or
- *  vice versa.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> BSWAP </c> instruction.
- *
- *  \param __A
- *     A 64-bit integer operand.
- *  \returns A 64-bit integer containing the swapped bytes.
- */
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
-__bswapq(long long __A) {
-  return __builtin_bswap64(__A);
-}
-
-#define _bswap64(A) __bswapq((A))
-#endif
-
-/** Counts the number of bits in the source operand having a value of 1.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> POPCNT </c> instruction or a
- *  a sequence of arithmetic and logic ops to calculate it.
- *
- *  \param __A
- *     An unsigned 32-bit integer operand.
- *  \returns A 32-bit integer containing the number of bits with value 1 in the
- *     source operand.
- */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
-__popcntd(unsigned int __A)
-{
-  return __builtin_popcount(__A);
-}
-
-#define _popcnt32(A) __popcntd((A))
-
-#ifdef __x86_64__
-/** Counts the number of bits in the source operand having a value of 1.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> POPCNT </c> instruction or a
- *  a sequence of arithmetic and logic ops to calculate it.
- *
- *  \param __A
- *     An unsigned 64-bit integer operand.
- *  \returns A 64-bit integer containing the number of bits with value 1 in the
- *     source operand.
- */
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
-__popcntq(unsigned long long __A)
-{
-  return __builtin_popcountll(__A);
-}
-
-#define _popcnt64(A) __popcntq((A))
-#endif /* __x86_64__ */
-
-#ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__readeflags(void)
-{
-  return __builtin_ia32_readeflags_u64();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
-__writeeflags(unsigned long long __f)
-{
-  __builtin_ia32_writeeflags_u64(__f);
-}
-
-#else /* !__x86_64__ */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
-__readeflags(void)
-{
-  return __builtin_ia32_readeflags_u32();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
-__writeeflags(unsigned int __f)
-{
-  __builtin_ia32_writeeflags_u32(__f);
-}
-#endif /* !__x86_64__ */
-
-/** Cast a 32-bit float value to a 32-bit unsigned integer value
- *
- *  \headerfile <x86intrin.h>
- *  This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction in x86_64,
- *  and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
- *
- *  \param __A
- *     A 32-bit float value.
- *  \returns a 32-bit unsigned integer containing the converted value.
- */
-static __inline__ unsigned int __attribute__((__always_inline__))
-_castf32_u32(float __A) {
-  unsigned int D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
-}
-
-/** Cast a 64-bit float value to a 64-bit unsigned integer value
- *
- *  \headerfile <x86intrin.h>
- *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
- *  and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
- *
- *  \param __A
- *     A 64-bit float value.
- *  \returns a 64-bit unsigned integer containing the converted value.
- */
-static __inline__ unsigned long long __attribute__((__always_inline__))
-_castf64_u64(double __A) {
-  unsigned long long D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
-}
-
-/** Cast a 32-bit unsigned integer value to a 32-bit float value
- *
- *  \headerfile <x86intrin.h>
- *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
- *  and corresponds to the <c> FLDS </c> instruction in ia32.
- *
- *  \param __A
- *     A 32-bit unsigned integer value.
- *  \returns a 32-bit float value containing the converted value.
- */
-static __inline__ float __attribute__((__always_inline__))
-_castu32_f32(unsigned int __A) {
-  float D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
-}
-
-/** Cast a 64-bit unsigned integer value to a 64-bit float value
- *
- *  \headerfile <x86intrin.h>
- *  This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
- *  and corresponds to the <c> FLDL </c> instruction in ia32.
- *
- *  \param __A
- *     A 64-bit unsigned integer value.
- *  \returns a 64-bit float value containing the converted value.
- */
-static __inline__ double __attribute__((__always_inline__))
-_castu64_f64(unsigned long long __A) {
-  double D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
-}
-
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- *     unsigned char operand.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> CRC32B </c> instruction.
- *
- *  \param __C
- *     An unsigned integer operand to add to the CRC-32C checksum of operand
- *     \a  __D.
- *  \param __D
- *     An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
- *  \returns The result of adding operand \a __C to the CRC-32C checksum of
- *     operand \a __D.
- */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
-__crc32b(unsigned int __C, unsigned char __D)
-{
-  return __builtin_ia32_crc32qi(__C, __D);
-}
-
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- *     unsigned short operand.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> CRC32W </c> instruction.
- *
- *  \param __C
- *     An unsigned integer operand to add to the CRC-32C checksum of operand
- *     \a  __D.
- *  \param __D
- *     An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
- *  \returns The result of adding operand \a __C to the CRC-32C checksum of
- *     operand \a __D.
- */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
-__crc32w(unsigned int __C, unsigned short __D)
-{
-  return __builtin_ia32_crc32hi(__C, __D);
-}
-
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- *     second unsigned integer operand.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> CRC32D </c> instruction.
- *
- *  \param __C
- *     An unsigned integer operand to add to the CRC-32C checksum of operand
- *     \a  __D.
- *  \param __D
- *     An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
- *  \returns The result of adding operand \a __C to the CRC-32C checksum of
- *     operand \a __D.
- */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
-__crc32d(unsigned int __C, unsigned int __D)
-{
-  return __builtin_ia32_crc32si(__C, __D);
-}
-
-#ifdef __x86_64__
-/** Adds the unsigned integer operand to the CRC-32C checksum of the
- *     unsigned 64-bit integer operand.
- *
- *  \headerfile <x86intrin.h>
- *
- *  This intrinsic corresponds to the <c> CRC32Q </c> instruction.
- *
- *  \param __C
- *     An unsigned integer operand to add to the CRC-32C checksum of operand
- *     \a  __D.
- *  \param __D
- *     An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
- *  \returns The result of adding operand \a __C to the CRC-32C checksum of
- *     operand \a __D.
- */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
-__crc32q(unsigned long long __C, unsigned long long __D)
-{
-  return __builtin_ia32_crc32di(__C, __D);
-}
-#endif /* __x86_64__ */
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__rdpmc(int __A) {
-  return __builtin_ia32_rdpmc(__A);
-}
-
-/* __rdtscp */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__rdtscp(unsigned int *__A) {
-  return __builtin_ia32_rdtscp(__A);
-}
-
-#define _rdtsc() __rdtsc()
-
-#define _rdpmc(A) __rdpmc(A)
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
-_wbinvd(void) {
-  __builtin_ia32_wbinvd();
-}
-
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
-__rolb(unsigned char __X, int __C) {
-  return __builtin_rotateleft8(__X, __C);
-}
-
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
-__rorb(unsigned char __X, int __C) {
-  return __builtin_rotateright8(__X, __C);
-}
-
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
-__rolw(unsigned short __X, int __C) {
-  return __builtin_rotateleft16(__X, __C);
-}
-
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
-__rorw(unsigned short __X, int __C) {
-  return __builtin_rotateright16(__X, __C);
-}
-
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
-__rold(unsigned int __X, int __C) {
-  return __builtin_rotateleft32(__X, __C);
-}
-
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
-__rord(unsigned int __X, int __C) {
-  return __builtin_rotateright32(__X, __C);
-}
-
-#ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__rolq(unsigned long long __X, int __C) {
-  return __builtin_rotateleft64(__X, __C);
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__rorq(unsigned long long __X, int __C) {
-  return __builtin_rotateright64(__X, __C);
-}
-#endif /* __x86_64__ */
-
-#ifndef _MSC_VER
-/* These are already provided as builtins for MSVC. */
-/* Select the correct function based on the size of long. */
-#ifdef __LP64__
-#define _lrotl(a,b) __rolq((a), (b))
-#define _lrotr(a,b) __rorq((a), (b))
-#else
-#define _lrotl(a,b) __rold((a), (b))
-#define _lrotr(a,b) __rord((a), (b))
-#endif
-#define _rotl(a,b) __rold((a), (b))
-#define _rotr(a,b) __rord((a), (b))
-#endif // _MSC_VER
-
-/* These are not builtins so need to be provided in all modes. */
-#define _rotwl(a,b) __rolw((a), (b))
-#define _rotwr(a,b) __rorw((a), (b))
-
-#endif /* __IA32INTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/immintrin.h b/linux-x86/lib64/clang/11.0.5/include/immintrin.h
deleted file mode 100644
index e9dff23..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/immintrin.h
+++ /dev/null
@@ -1,588 +0,0 @@
-/*===---- immintrin.h - Intel intrinsics -----------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __IMMINTRIN_H
-#define __IMMINTRIN_H
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__MMX__)
-#include <mmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSE__)
-#include <xmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSE2__)
-#include <emmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSE3__)
-#include <pmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SSSE3__)
-#include <tmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__SSE4_2__) || defined(__SSE4_1__))
-#include <smmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AES__) || defined(__PCLMUL__))
-#include <wmmintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CLFLUSHOPT__)
-#include <clflushoptintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CLWB__)
-#include <clwbintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX__)
-#include <avxintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX2__)
-#include <avx2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__F16C__)
-#include <f16cintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__VPCLMULQDQ__)
-#include <vpclmulqdqintrin.h>
-#endif
-
-/* No feature check desired due to internal checks */
-#include <bmiintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__BMI2__)
-#include <bmi2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__LZCNT__)
-#include <lzcntintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__POPCNT__)
-#include <popcntintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__FMA__)
-#include <fmaintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512F__)
-#include <avx512fintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VL__)
-#include <avx512vlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512BW__)
-#include <avx512bwintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512BITALG__)
-#include <avx512bitalgintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512CD__)
-#include <avx512cdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VPOPCNTDQ__)
-#include <avx512vpopcntdqintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__))
-#include <avx512vpopcntdqvlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VNNI__)
-#include <avx512vnniintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512VNNI__))
-#include <avx512vlvnniintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512DQ__)
-#include <avx512dqintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512BITALG__))
-#include <avx512vlbitalgintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512BW__))
-#include <avx512vlbwintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512CD__))
-#include <avx512vlcdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512DQ__))
-#include <avx512vldqintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512ER__)
-#include <avx512erintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512IFMA__)
-#include <avx512ifmaintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512IFMA__) && defined(__AVX512VL__))
-#include <avx512ifmavlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VBMI__)
-#include <avx512vbmiintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VBMI__) && defined(__AVX512VL__))
-#include <avx512vbmivlintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VBMI2__)
-#include <avx512vbmi2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VBMI2__) && defined(__AVX512VL__))
-#include <avx512vlvbmi2intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512PF__)
-#include <avx512pfintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512BF16__)
-#include <avx512bf16intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512BF16__))
-#include <avx512vlbf16intrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__PKU__)
-#include <pkuintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__VAES__)
-#include <vaesintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__GFNI__)
-#include <gfniintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RDPID__)
-/// Returns the value of the IA32_TSC_AUX MSR (0xc0000103).
-///
-/// \headerfile <immintrin.h>
-///
-/// This intrinsic corresponds to the <c> RDPID </c> instruction.
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("rdpid")))
-_rdpid_u32(void) {
-  return __builtin_ia32_rdpid();
-}
-#endif // __RDPID__
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RDRND__)
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand16_step(unsigned short *__p)
-{
-  return __builtin_ia32_rdrand16_step(__p);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand32_step(unsigned int *__p)
-{
-  return __builtin_ia32_rdrand32_step(__p);
-}
-
-#ifdef __x86_64__
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
-_rdrand64_step(unsigned long long *__p)
-{
-  return __builtin_ia32_rdrand64_step(__p);
-}
-#endif
-#endif /* __RDRND__ */
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__FSGSBASE__)
-#ifdef __x86_64__
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u32(void)
-{
-  return __builtin_ia32_rdfsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readfsbase_u64(void)
-{
-  return __builtin_ia32_rdfsbase64();
-}
-
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u32(void)
-{
-  return __builtin_ia32_rdgsbase32();
-}
-
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_readgsbase_u64(void)
-{
-  return __builtin_ia32_rdgsbase64();
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrfsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writefsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrfsbase64(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u32(unsigned int __V)
-{
-  __builtin_ia32_wrgsbase32(__V);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("fsgsbase")))
-_writegsbase_u64(unsigned long long __V)
-{
-  __builtin_ia32_wrgsbase64(__V);
-}
-
-#endif
-#endif /* __FSGSBASE__ */
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__MOVBE__)
-
-/* The structs used below are to force the load/store to be unaligned. This
- * is accomplished with the __packed__ attribute. The __may_alias__ prevents
- * tbaa metadata from being generated based on the struct and the type of the
- * field inside of it.
- */
-
-static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i16(void const * __P) {
-  struct __loadu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i16(void * __P, short __D) {
-  struct __storeu_i16 {
-    short __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
-}
-
-static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i32(void const * __P) {
-  struct __loadu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i32(void * __P, int __D) {
-  struct __storeu_i32 {
-    int __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
-}
-
-#ifdef __x86_64__
-static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_loadbe_i64(void const * __P) {
-  struct __loadu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
-}
-
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
-_storebe_i64(void * __P, long long __D) {
-  struct __storeu_i64 {
-    long long __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
-}
-#endif
-#endif /* __MOVBE */
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RTM__)
-#include <rtmintrin.h>
-#include <xtestintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SHA__)
-#include <shaintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__FXSR__)
-#include <fxsrintrin.h>
-#endif
-
-/* No feature check desired due to internal MSC_VER checks */
-#include <xsaveintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__XSAVEOPT__)
-#include <xsaveoptintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__XSAVEC__)
-#include <xsavecintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__XSAVES__)
-#include <xsavesintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SHSTK__)
-#include <cetintrin.h>
-#endif
-
-/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
- * whereas others are also available at all times. */
-#include <adxintrin.h>
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__RDSEED__)
-#include <rdseedintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__WBNOINVD__)
-#include <wbnoinvdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__CLDEMOTE__)
-#include <cldemoteintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__WAITPKG__)
-#include <waitpkgintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__MOVDIRI__) || defined(__MOVDIR64B__)
-#include <movdirintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__PCONFIG__)
-#include <pconfigintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SGX__)
-#include <sgxintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__PTWRITE__)
-#include <ptwriteintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__INVPCID__)
-#include <invpcidintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
-#include <amxintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__AVX512VP2INTERSECT__)
-#include <avx512vp2intersectintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__))
-#include <avx512vlvp2intersectintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__ENQCMD__)
-#include <enqcmdintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__SERIALIZE__)
-#include <serializeintrin.h>
-#endif
-
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
-    defined(__TSXLDTRK__)
-#include <tsxldtrkintrin.h>
-#endif
-
-#if defined(_MSC_VER) && __has_extension(gnu_asm)
-/* Define the default attributes for these intrinsics */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
-#ifdef __cplusplus
-extern "C" {
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLEAcquire(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchange_HLERelease(long volatile *_Target, long _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLEAcquire(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64_HLERelease(__int64 volatile *_Target, __int64 _Value) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; xchg %0, %1"
-                       : "+r" (_Value), "+m" (*_Target) :: "memory");
-  return _Value;
-}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Compare Exchange HLE
-\*----------------------------------------------------------------------------*/
-#if defined(__i386__) || defined(__x86_64__)
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLEAcquire(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange_HLERelease(long volatile *_Destination,
-                              long _Exchange, long _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#if defined(__x86_64__)
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf2 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64_HLERelease(__int64 volatile *_Destination,
-                              __int64 _Exchange, __int64 _Comparand) {
-  __asm__ __volatile__(".byte 0xf3 ; lock ; cmpxchg %2, %1"
-                       : "+a" (_Comparand), "+m" (*_Destination)
-                       : "r" (_Exchange) : "memory");
-  return _Comparand;
-}
-#endif
-#ifdef __cplusplus
-}
-#endif
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* defined(_MSC_VER) && __has_extension(gnu_asm) */
-
-#endif /* __IMMINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
deleted file mode 100644
index bd6011e..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
+++ /dev/null
@@ -1,75 +0,0 @@
-/*===-- __clang_openmp_device_functions.h - OpenMP math declares ------ c++ -===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_OPENMP_CMATH_H__
-#define __CLANG_OPENMP_CMATH_H__
-
-#ifndef _OPENMP
-#error "This file is for OpenMP compilation only."
-#endif
-
-#include_next <cmath>
-
-// Make sure we include our math.h overlay, it probably happend already but we
-// need to be sure.
-#include <math.h>
-
-// We (might) need cstdlib because __clang_cuda_cmath.h below declares `abs`
-// which might live in cstdlib.
-#include <cstdlib>
-
-#pragma omp begin declare variant match(                                       \
-    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
-
-#define __CUDA__
-#define __OPENMP_NVPTX__
-#include <__clang_cuda_cmath.h>
-#undef __OPENMP_NVPTX__
-#undef __CUDA__
-
-// Overloads not provided by the CUDA wrappers but by the CUDA system headers.
-// Since we do not include the latter we define them ourselves.
-#define __DEVICE__ static constexpr __attribute__((always_inline, nothrow))
-
-__DEVICE__ float acosh(float __x) { return ::acoshf(__x); }
-__DEVICE__ float asinh(float __x) { return ::asinhf(__x); }
-__DEVICE__ float atanh(float __x) { return ::atanhf(__x); }
-__DEVICE__ float cbrt(float __x) { return ::cbrtf(__x); }
-__DEVICE__ float erf(float __x) { return ::erff(__x); }
-__DEVICE__ float erfc(float __x) { return ::erfcf(__x); }
-__DEVICE__ float exp2(float __x) { return ::exp2f(__x); }
-__DEVICE__ float expm1(float __x) { return ::expm1f(__x); }
-__DEVICE__ float fdim(float __x, float __y) { return ::fdimf(__x, __y); }
-__DEVICE__ float hypot(float __x, float __y) { return ::hypotf(__x, __y); }
-__DEVICE__ int ilogb(float __x) { return ::ilogbf(__x); }
-__DEVICE__ float lgamma(float __x) { return ::lgammaf(__x); }
-__DEVICE__ long long int llrint(float __x) { return ::llrintf(__x); }
-__DEVICE__ long long int llround(float __x) { return ::llroundf(__x); }
-__DEVICE__ float log1p(float __x) { return ::log1pf(__x); }
-__DEVICE__ float log2(float __x) { return ::log2f(__x); }
-__DEVICE__ float logb(float __x) { return ::logbf(__x); }
-__DEVICE__ long int lrint(float __x) { return ::lrintf(__x); }
-__DEVICE__ long int lround(float __x) { return ::lroundf(__x); }
-__DEVICE__ float nextafter(float __x, float __y) {
-  return ::nextafterf(__x, __y);
-}
-__DEVICE__ float remainder(float __x, float __y) {
-  return ::remainderf(__x, __y);
-}
-__DEVICE__ float scalbln(float __x, long int __y) {
-  return ::scalblnf(__x, __y);
-}
-__DEVICE__ float scalbn(float __x, int __y) { return ::scalbnf(__x, __y); }
-__DEVICE__ float tgamma(float __x) { return ::tgammaf(__x); }
-
-#undef __DEVICE__
-
-#pragma omp end declare variant
-
-#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex b/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
deleted file mode 100644
index 1ed0b14..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex
+++ /dev/null
@@ -1,25 +0,0 @@
-/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __CLANG_OPENMP_COMPLEX__
-#define __CLANG_OPENMP_COMPLEX__
-
-#ifndef _OPENMP
-#error "This file is for OpenMP compilation only."
-#endif
-
-// We require std::math functions in the complex builtins below.
-#include <cmath>
-
-#define __CUDA__
-#include <__clang_cuda_complex_builtins.h>
-#endif
-
-// Grab the host header too.
-#include_next <complex>
diff --git a/linux-x86/lib64/clang/11.0.5/include/popcntintrin.h b/linux-x86/lib64/clang/11.0.5/include/popcntintrin.h
deleted file mode 100644
index 3129010..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/popcntintrin.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __POPCNTINTRIN_H
-#define __POPCNTINTRIN_H
-
-/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
-
-/// Counts the number of bits in the source operand having a value of 1.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
-///
-/// \param __A
-///    An unsigned 32-bit integer operand.
-/// \returns A 32-bit integer containing the number of bits with value 1 in the
-///    source operand.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_popcnt_u32(unsigned int __A)
-{
-  return __builtin_popcount(__A);
-}
-
-#ifdef __x86_64__
-/// Counts the number of bits in the source operand having a value of 1.
-///
-/// \headerfile <x86intrin.h>
-///
-/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
-///
-/// \param __A
-///    An unsigned 64-bit integer operand.
-/// \returns A 64-bit integer containing the number of bits with value 1 in the
-///    source operand.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_popcnt_u64(unsigned long long __A)
-{
-  return __builtin_popcountll(__A);
-}
-#endif /* __x86_64__ */
-
-#undef __DEFAULT_FN_ATTRS
-
-#endif /* __POPCNTINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
deleted file mode 100644
index a691352..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
+++ /dev/null
@@ -1,785 +0,0 @@
-/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
-|*
-|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-|* See https://llvm.org/LICENSE.txt for license information.
-|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-|*
-\*===----------------------------------------------------------------------===*/
-/*
- * This is the master file that defines all the data structure, signature,
- * constant literals that are shared across profiling runtime library,
- * compiler (instrumentation), and host tools (reader/writer). The entities
- * defined in this file affect the profile runtime ABI, the raw profile format,
- * or both.
- *
- * The file has two identical copies. The master copy lives in LLVM and
- * the other one  sits in compiler-rt/lib/profile directory. To make changes
- * in this file, first modify the master copy and copy it over to compiler-rt.
- * Testing of any change in this file can start only after the two copies are
- * synced up.
- *
- * The first part of the file includes macros that defines types, names, and
- * initializers for the member fields of the core data structures. The field
- * declarations for one structure is enabled by defining the field activation
- * macro associated with that structure. Only one field activation record
- * can be defined at one time and the rest definitions will be filtered out by
- * the preprocessor.
- *
- * Examples of how the template is used to instantiate structure definition:
- * 1. To declare a structure:
- *
- * struct ProfData {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *    Type Name;
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 2. To construct LLVM type arrays for the struct type:
- *
- * Type *DataTypes[] = {
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   LLVMType,
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- * 4. To construct constant array for the initializers:
- * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
- *   Initializer,
- * Constant *ConstantVals[] = {
- * #include "llvm/ProfileData/InstrProfData.inc"
- * };
- *
- *
- * The second part of the file includes definitions all other entities that
- * are related to runtime ABI and format. When no field activation macro is
- * defined, this file can be included to introduce the definitions.
- *
-\*===----------------------------------------------------------------------===*/
-
-/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
- * the compiler runtime. */
-#ifndef INSTR_PROF_VISIBILITY
-#define INSTR_PROF_VISIBILITY
-#endif
-
-/* INSTR_PROF_DATA start. */
-/* Definition of member fields of the per-function control structure. */
-#ifndef INSTR_PROF_DATA
-#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
-INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
-                Inc->getHash()->getZExtValue()))
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt64PtrTy(Ctx), CounterPtr, \
-                ConstantExpr::getBitCast(CounterPtr, \
-                llvm::Type::getInt64PtrTy(Ctx)))
-/* This is used to map function pointers for the indirect call targets to
- * function name hashes during the conversion from raw to merged profile
- * data.
- */
-INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
-                FunctionAddr)
-INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
-                ValuesPtrExpr)
-INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
-                ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
-INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
-                ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
-#undef INSTR_PROF_DATA
-/* INSTR_PROF_DATA end. */
-
-
-/* This is an internal data structure used by value profiler. It
- * is defined here to allow serialization code sharing by LLVM
- * to be used in unit test.
- *
- * typedef struct ValueProfNode {
- *   // InstrProfValueData VData;
- *   uint64_t Value;
- *   uint64_t Count;
- *   struct ValueProfNode *Next;
- * } ValueProfNode;
- */
-/* INSTR_PROF_VALUE_NODE start. */
-#ifndef INSTR_PROF_VALUE_NODE
-#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
-                      ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
-INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
-                      ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
-#undef INSTR_PROF_VALUE_NODE
-/* INSTR_PROF_VALUE_NODE end. */
-
-/* INSTR_PROF_RAW_HEADER  start */
-/* Definition of member fields of the raw profile header data structure. */
-#ifndef INSTR_PROF_RAW_HEADER
-#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
-INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
-INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesBeforeCounters, PaddingBytesBeforeCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
-INSTR_PROF_RAW_HEADER(uint64_t, PaddingBytesAfterCounters, PaddingBytesAfterCounters)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesSize,  NamesSize)
-INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta, (uintptr_t)CountersBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
-INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
-#undef INSTR_PROF_RAW_HEADER
-/* INSTR_PROF_RAW_HEADER  end */
-
-/* VALUE_PROF_FUNC_PARAM start */
-/* Definition of parameter types of the runtime API used to do value profiling
- * for a given value site.
- */
-#ifndef VALUE_PROF_FUNC_PARAM
-#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
-#define INSTR_PROF_COMMA
-#else
-#define INSTR_PROF_DATA_DEFINED
-#define INSTR_PROF_COMMA ,
-#endif
-VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
-#ifndef VALUE_RANGE_PROF
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
-#else /* VALUE_RANGE_PROF */
-VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeLast, Type::getInt64Ty(Ctx)) \
-                      INSTR_PROF_COMMA
-VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
-#endif /*VALUE_RANGE_PROF */
-#undef VALUE_PROF_FUNC_PARAM
-#undef INSTR_PROF_COMMA
-/* VALUE_PROF_FUNC_PARAM end */
-
-/* VALUE_PROF_KIND start */
-#ifndef VALUE_PROF_KIND
-#define VALUE_PROF_KIND(Enumerator, Value, Descr)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-/* For indirect function call value profiling, the addresses of the target
- * functions are profiled by the instrumented code. The target addresses are
- * written in the raw profile data and converted to target function name's MD5
- * hash by the profile reader during deserialization.  Typically, this happens
- * when the raw profile data is read during profile merging.
- *
- * For this remapping the ProfData is used.  ProfData contains both the function
- * name hash and the function address.
- */
-VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target")
-/* For memory intrinsic functions size profiling. */
-VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size")
-/* These two kinds must be the last to be
- * declared. This is to make sure the string
- * array created with the template can be
- * indexed with the kind value.
- */
-VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first")
-VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")
-
-#undef VALUE_PROF_KIND
-/* VALUE_PROF_KIND end */
-
-#undef COVMAP_V2_OR_V3
-#ifdef COVMAP_V2
-#define COVMAP_V2_OR_V3
-#endif
-#ifdef COVMAP_V3
-#define COVMAP_V2_OR_V3
-#endif
-
-/* COVMAP_FUNC_RECORD start */
-/* Definition of member fields of the function record structure in coverage
- * map.
- */
-#ifndef COVMAP_FUNC_RECORD
-#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-#ifdef COVMAP_V1
-COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
-                   NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
-                   llvm::Type::getInt8PtrTy(Ctx)))
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
-                   llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
-                   NameValue.size()))
-#endif
-#ifdef COVMAP_V2_OR_V3
-COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), NameHash))
-#endif
-COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt32Ty(Ctx), CoverageMapping.size()))
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), FuncHash))
-#ifdef COVMAP_V3
-COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FilenamesRef, \
-                   llvm::ConstantInt::get( \
-                     llvm::Type::getInt64Ty(Ctx), FilenamesRef))
-COVMAP_FUNC_RECORD(const char, \
-                   llvm::ArrayType::get(llvm::Type::getInt8Ty(Ctx), \
-                                        CoverageMapping.size()), \
-                   CoverageMapping,
-                   llvm::ConstantDataArray::getRaw( \
-                     CoverageMapping, CoverageMapping.size(), \
-                     llvm::Type::getInt8Ty(Ctx)))
-#endif
-#undef COVMAP_FUNC_RECORD
-/* COVMAP_FUNC_RECORD end.  */
-
-/* COVMAP_HEADER start */
-/* Definition of member fields of coverage map header.
- */
-#ifndef COVMAP_HEADER
-#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
-#else
-#define INSTR_PROF_DATA_DEFINED
-#endif
-COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
-              llvm::ConstantInt::get(Int32Ty, NRecords))
-COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
-              llvm::ConstantInt::get(Int32Ty, FilenamesSize))
-COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
-              llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
-COVMAP_HEADER(uint32_t, Int32Ty, Version, \
-              llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
-#undef COVMAP_HEADER
-/* COVMAP_HEADER end.  */
-
-
-#ifdef INSTR_PROF_SECT_ENTRY
-#define INSTR_PROF_DATA_DEFINED
-INSTR_PROF_SECT_ENTRY(IPSK_data, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
-                      INSTR_PROF_DATA_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
-                      INSTR_PROF_CNTS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_name, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
-                      INSTR_PROF_NAME_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vals, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
-                      INSTR_PROF_VALS_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
-                      INSTR_PROF_VNODES_COFF, "__DATA,")
-INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
-                      INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_covfun, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON), \
-                      INSTR_PROF_COVFUN_COFF, "__LLVM_COV,")
-INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
-                      INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
-
-#undef INSTR_PROF_SECT_ENTRY
-#endif
-
-
-#ifdef INSTR_PROF_VALUE_PROF_DATA
-#define INSTR_PROF_DATA_DEFINED
-
-#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
-/*!
- * This is the header of the data structure that defines the on-disk
- * layout of the value profile data of a particular kind for one function.
- */
-typedef struct ValueProfRecord {
-  /* The kind of the value profile record. */
-  uint32_t Kind;
-  /*
-   * The number of value profile sites. It is guaranteed to be non-zero;
-   * otherwise the record for this kind won't be emitted.
-   */
-  uint32_t NumValueSites;
-  /*
-   * The first element of the array that stores the number of profiled
-   * values for each value site. The size of the array is NumValueSites.
-   * Since NumValueSites is greater than zero, there is at least one
-   * element in the array.
-   */
-  uint8_t SiteCountArray[1];
-
-  /*
-   * The fake declaration is for documentation purpose only.
-   * Align the start of next field to be on 8 byte boundaries.
-  uint8_t Padding[X];
-   */
-
-  /* The array of value profile data. The size of the array is the sum
-   * of all elements in SiteCountArray[].
-  InstrProfValueData ValueData[];
-   */
-
-#ifdef __cplusplus
-  /*!
-   * Return the number of value sites.
-   */
-  uint32_t getNumValueSites() const { return NumValueSites; }
-  /*!
-   * Read data from this record and save it to Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  /*
-   * In-place byte swap:
-   * Do byte swap for this instance. \c Old is the original order before
-   * the swap, and \c New is the New byte order.
-   */
-  void swapBytes(support::endianness Old, support::endianness New);
-#endif
-} ValueProfRecord;
-
-/*!
- * Per-function header/control data structure for value profiling
- * data in indexed format.
- */
-typedef struct ValueProfData {
-  /*
-   * Total size in bytes including this field. It must be a multiple
-   * of sizeof(uint64_t).
-   */
-  uint32_t TotalSize;
-  /*
-   *The number of value profile kinds that has value profile data.
-   * In this implementation, a value profile kind is considered to
-   * have profile data if the number of value profile sites for the
-   * kind is not zero. More aggressively, the implementation can
-   * choose to check the actual data value: if none of the value sites
-   * has any profiled values, the kind can be skipped.
-   */
-  uint32_t NumValueKinds;
-
-  /*
-   * Following are a sequence of variable length records. The prefix/header
-   * of each record is defined by ValueProfRecord type. The number of
-   * records is NumValueKinds.
-   * ValueProfRecord Record_1;
-   * ValueProfRecord Record_N;
-   */
-
-#if __cplusplus
-  /*!
-   * Return the total size in bytes of the on-disk value profile data
-   * given the data stored in Record.
-   */
-  static uint32_t getSize(const InstrProfRecord &Record);
-  /*!
-   * Return a pointer to \c ValueProfData instance ready to be streamed.
-   */
-  static std::unique_ptr<ValueProfData>
-  serializeFrom(const InstrProfRecord &Record);
-  /*!
-   * Check the integrity of the record.
-   */
-  Error checkIntegrity();
-  /*!
-   * Return a pointer to \c ValueProfileData instance ready to be read.
-   * All data in the instance are properly byte swapped. The input
-   * data is assumed to be in little endian order.
-   */
-  static Expected<std::unique_ptr<ValueProfData>>
-  getValueProfData(const unsigned char *SrcBuffer,
-                   const unsigned char *const SrcBufferEnd,
-                   support::endianness SrcDataEndianness);
-  /*!
-   * Swap byte order from \c Endianness order to host byte order.
-   */
-  void swapBytesToHost(support::endianness Endianness);
-  /*!
-   * Swap byte order from host byte order to \c Endianness order.
-   */
-  void swapBytesFromHost(support::endianness Endianness);
-  /*!
-   * Return the total size of \c ValueProfileData.
-   */
-  uint32_t getSize() const { return TotalSize; }
-  /*!
-   * Read data from this data and save it to \c Record.
-   */
-  void deserializeTo(InstrProfRecord &Record,
-                     InstrProfSymtab *SymTab);
-  void operator delete(void *ptr) { ::operator delete(ptr); }
-#endif
-} ValueProfData;
-
-/*
- * The closure is designed to abstact away two types of value profile data:
- * - InstrProfRecord which is the primary data structure used to
- *   represent profile data in host tools (reader, writer, and profile-use)
- * - value profile runtime data structure suitable to be used by C
- *   runtime library.
- *
- * Both sources of data need to serialize to disk/memory-buffer in common
- * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
- * writer to share the same format and code with indexed profile writer.
- *
- * For documentation of the member methods below, refer to corresponding methods
- * in class InstrProfRecord.
- */
-typedef struct ValueProfRecordClosure {
-  const void *Record;
-  uint32_t (*GetNumValueKinds)(const void *Record);
-  uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
-  uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
-
-  /*
-   * After extracting the value profile data from the value profile record,
-   * this method is used to map the in-memory value to on-disk value. If
-   * the method is null, value will be written out untranslated.
-   */
-  uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
-  void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
-                          uint32_t S);
-  ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
-} ValueProfRecordClosure;
-
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getFirstValueProfRecord(ValueProfData *VPD);
-INSTR_PROF_VISIBILITY ValueProfRecord *
-getValueProfRecordNext(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY InstrProfValueData *
-getValueProfRecordValueData(ValueProfRecord *VPR);
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfRecordHeaderSize(uint32_t NumValueSites);
-
-#undef INSTR_PROF_VALUE_PROF_DATA
-#endif  /* INSTR_PROF_VALUE_PROF_DATA */
-
-
-#ifdef INSTR_PROF_COMMON_API_IMPL
-#define INSTR_PROF_DATA_DEFINED
-#ifdef __cplusplus
-#define INSTR_PROF_INLINE inline
-#define INSTR_PROF_NULLPTR nullptr
-#else
-#define INSTR_PROF_INLINE
-#define INSTR_PROF_NULLPTR NULL
-#endif
-
-#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-
-/*!
- * Return the \c ValueProfRecord header size including the
- * padding bytes.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
-  uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
-                  sizeof(uint8_t) * NumValueSites;
-  /* Round the size to multiple of 8 bytes. */
-  Size = (Size + 7) & ~7;
-  return Size;
-}
-
-/*!
- * Return the total size of the value profile record including the
- * header and the value data.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordSize(uint32_t NumValueSites,
-                                uint32_t NumValueData) {
-  return getValueProfRecordHeaderSize(NumValueSites) +
-         sizeof(InstrProfValueData) * NumValueData;
-}
-
-/*!
- * Return the pointer to the start of value data array.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
-  return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
-                                                   This->NumValueSites));
-}
-
-/*!
- * Return the total number of value data for \c This record.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
-  uint32_t NumValueData = 0;
-  uint32_t I;
-  for (I = 0; I < This->NumValueSites; I++)
-    NumValueData += This->SiteCountArray[I];
-  return NumValueData;
-}
-
-/*!
- * Use this method to advance to the next \c This \c ValueProfRecord.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
-  uint32_t NumValueData = getValueProfRecordNumValueData(This);
-  return (ValueProfRecord *)((char *)This +
-                             getValueProfRecordSize(This->NumValueSites,
-                                                    NumValueData));
-}
-
-/*!
- * Return the first \c ValueProfRecord instance.
- */
-INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
-ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
-  return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
-}
-
-/* Closure based interfaces.  */
-
-/*!
- * Return the total size in bytes of the on-disk value profile data
- * given the data stored in Record.
- */
-INSTR_PROF_VISIBILITY uint32_t
-getValueProfDataSize(ValueProfRecordClosure *Closure) {
-  uint32_t Kind;
-  uint32_t TotalSize = sizeof(ValueProfData);
-  const void *Record = Closure->Record;
-
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
-    if (!NumValueSites)
-      continue;
-    TotalSize += getValueProfRecordSize(NumValueSites,
-                                        Closure->GetNumValueData(Record, Kind));
-  }
-  return TotalSize;
-}
-
-/*!
- * Extract value profile data of a function for the profile kind \c ValueKind
- * from the \c Closure and serialize the data into \c This record instance.
- */
-INSTR_PROF_VISIBILITY void
-serializeValueProfRecordFrom(ValueProfRecord *This,
-                             ValueProfRecordClosure *Closure,
-                             uint32_t ValueKind, uint32_t NumValueSites) {
-  uint32_t S;
-  const void *Record = Closure->Record;
-  This->Kind = ValueKind;
-  This->NumValueSites = NumValueSites;
-  InstrProfValueData *DstVD = getValueProfRecordValueData(This);
-
-  for (S = 0; S < NumValueSites; S++) {
-    uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
-    This->SiteCountArray[S] = ND;
-    Closure->GetValueForSite(Record, DstVD, ValueKind, S);
-    DstVD += ND;
-  }
-}
-
-/*!
- * Extract value profile data of a function  from the \c Closure
- * and serialize the data into \c DstData if it is not NULL or heap
- * memory allocated by the \c Closure's allocator method. If \c
- * DstData is not null, the caller is expected to set the TotalSize
- * in DstData.
- */
-INSTR_PROF_VISIBILITY ValueProfData *
-serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
-                           ValueProfData *DstData) {
-  uint32_t Kind;
-  uint32_t TotalSize =
-      DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
-
-  ValueProfData *VPD =
-      DstData ? DstData : Closure->AllocValueProfData(TotalSize);
-
-  VPD->TotalSize = TotalSize;
-  VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
-  ValueProfRecord *VR = getFirstValueProfRecord(VPD);
-  for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
-    uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
-    if (!NumValueSites)
-      continue;
-    serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
-    VR = getValueProfRecordNext(VR);
-  }
-  return VPD;
-}
-
-#undef INSTR_PROF_COMMON_API_IMPL
-#endif /* INSTR_PROF_COMMON_API_IMPL */
-
-/*============================================================================*/
-
-#ifndef INSTR_PROF_DATA_DEFINED
-
-#ifndef INSTR_PROF_DATA_INC
-#define INSTR_PROF_DATA_INC
-
-/* Helper macros.  */
-#define INSTR_PROF_SIMPLE_QUOTE(x) #x
-#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
-#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
-#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
-
-/* Magic number to detect file format and endianness.
- * Use 255 at one end, since no UTF-8 file can use that character.  Avoid 0,
- * so that utilities, like strings, don't grab it as a string.  129 is also
- * invalid UTF-8, and high enough to be interesting.
- * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
- * for 32-bit platforms.
- */
-#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
-#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
-       (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 |  \
-        (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
-
-/* Raw profile format version (start from 1). */
-#define INSTR_PROF_RAW_VERSION 5
-/* Indexed profile format version (start from 1). */
-#define INSTR_PROF_INDEX_VERSION 6
-/* Coverage mapping format version (start from 0). */
-#define INSTR_PROF_COVMAP_VERSION 3
-
-/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
- * version for other variants of profile. We set the lowest bit of the upper 8
- * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
- * generated profile, and 0 if this is a Clang FE generated profile.
- * 1 in bit 57 indicates there are context-sensitive records in the profile.
- */
-#define VARIANT_MASKS_ALL 0xff00000000000000ULL
-#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
-#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
-#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
-#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
-#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
-
-/* The variable that holds the name of the profile data
- * specified via command line. */
-#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
-
-/* section name strings common to all targets other
-   than WIN32 */
-#define INSTR_PROF_DATA_COMMON __llvm_prf_data
-#define INSTR_PROF_NAME_COMMON __llvm_prf_names
-#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
-#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
-#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
-#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
-#define INSTR_PROF_COVFUN_COMMON __llvm_covfun
-#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
-/* Windows section names. Because these section names contain dollar characters,
- * they must be quoted.
- */
-#define INSTR_PROF_DATA_COFF ".lprfd$M"
-#define INSTR_PROF_NAME_COFF ".lprfn$M"
-#define INSTR_PROF_CNTS_COFF ".lprfc$M"
-#define INSTR_PROF_VALS_COFF ".lprfv$M"
-#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
-#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
-#define INSTR_PROF_COVFUN_COFF ".lcovfun$M"
-#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
-
-#ifdef _WIN32
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
-#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_COVFUN_COFF
-#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
-#else
-/* Runtime section names and name strings.  */
-#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
-#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
-#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
-/* Array of pointers. Each pointer points to a list
- * of value nodes associated with one value site.
- */
-#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON)
-/* Value profile nodes section. */
-#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
-#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
-#define INSTR_PROF_COVFUN_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVFUN_COMMON)
-/* Order file instrumentation. */
-#define INSTR_PROF_ORDERFILE_SECT_NAME                                         \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
-#endif
-
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer
-#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR                                   \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME)
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx
-#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR                               \
-  INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME)
-
-/* Macros to define start/stop section symbol for a given
- * section on Linux. For instance
- * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
- * expand to __start___llvm_prof_data
- */
-#define INSTR_PROF_SECT_START(Sect) \
-        INSTR_PROF_CONCAT(__start_,Sect)
-#define INSTR_PROF_SECT_STOP(Sect) \
-        INSTR_PROF_CONCAT(__stop_,Sect)
-
-/* Value Profiling API linkage name.  */
-#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
-#define INSTR_PROF_VALUE_PROF_FUNC_STR \
-        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
-#define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
-#define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
-        INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
-
-/* InstrProfile per-function control data alignment.  */
-#define INSTR_PROF_DATA_ALIGNMENT 8
-
-/* The data structure that represents a tracked value by the
- * value profiler.
- */
-typedef struct InstrProfValueData {
-  /* Profiled value. */
-  uint64_t Value;
-  /* Number of times the value appears in the training run. */
-  uint64_t Count;
-} InstrProfValueData;
-
-#endif /* INSTR_PROF_DATA_INC */
-
-#ifndef INSTR_ORDER_FILE_INC
-/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */
-#define INSTR_ORDER_FILE_BUFFER_SIZE 131072
-#define INSTR_ORDER_FILE_BUFFER_BITS 17
-#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff
-#endif /* INSTR_ORDER_FILE_INC */
-#else
-#undef INSTR_PROF_DATA_DEFINED
-#endif
-
-#undef COVMAP_V2_OR_V3
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
deleted file mode 100644
index 6af93aa..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
+++ /dev/null
@@ -1,326 +0,0 @@
-//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of AddressSanitizer (ASan).
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_ASAN_INTERFACE_H
-#define SANITIZER_ASAN_INTERFACE_H
-
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-/// Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
-///
-/// This memory must be previously allocated by your program. Instrumented
-/// code is forbidden from accessing addresses in this region until it is
-/// unpoisoned. This function is not guaranteed to poison the entire region -
-/// it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
-/// alignment restrictions.
-///
-/// \note This function is not thread-safe because no two threads can poison or
-/// unpoison memory in the same memory region simultaneously.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-void __asan_poison_memory_region(void const volatile *addr, size_t size);
-
-/// Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
-///
-/// This memory must be previously allocated by your program. Accessing
-/// addresses in this region is allowed until this region is poisoned again.
-/// This function could unpoison a super-region of <c>[addr, addr+size)</c> due
-/// to ASan alignment restrictions.
-///
-/// \note This function is not thread-safe because no two threads can
-/// poison or unpoison memory in the same memory region simultaneously.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
-
-// Macros provided for convenience.
-#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
-/// Marks a memory region as unaddressable.
-///
-/// \note Macro provided for convenience; defined as a no-op if ASan is not
-/// enabled.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-#define ASAN_POISON_MEMORY_REGION(addr, size) \
-  __asan_poison_memory_region((addr), (size))
-
-/// Marks a memory region as addressable.
-///
-/// \note Macro provided for convenience; defined as a no-op if ASan is not
-/// enabled.
-///
-/// \param addr Start of memory region.
-/// \param size Size of memory region.
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
-  __asan_unpoison_memory_region((addr), (size))
-#else
-#define ASAN_POISON_MEMORY_REGION(addr, size) \
-  ((void)(addr), (void)(size))
-#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
-  ((void)(addr), (void)(size))
-#endif
-
-/// Checks if an address is poisoned.
-///
-/// Returns 1 if <c><i>addr</i></c> is poisoned (that is, 1-byte read/write
-/// access to this address would result in an error report from ASan).
-/// Otherwise returns 0.
-///
-/// \param addr Address to check.
-///
-/// \retval 1 Address is poisoned.
-/// \retval 0 Address is not poisoned.
-int __asan_address_is_poisoned(void const volatile *addr);
-
-/// Checks if a region is poisoned.
-///
-/// If at least one byte in <c>[beg, beg+size)</c> is poisoned, returns the
-/// address of the first such byte. Otherwise returns 0.
-///
-/// \param beg Start of memory region.
-/// \param size Start of memory region.
-/// \returns Address of first poisoned byte.
-void *__asan_region_is_poisoned(void *beg, size_t size);
-
-/// Describes an address (useful for calling from the debugger).
-///
-/// Prints the description of <c><i>addr</i></c>.
-///
-/// \param addr Address to describe.
-void __asan_describe_address(void *addr);
-
-/// Checks if an error has been or is being reported (useful for calling from
-/// the debugger to get information about an ASan error).
-///
-/// Returns 1 if an error has been (or is being) reported. Otherwise returns 0.
-///
-/// \returns 1 if an error has been (or is being) reported. Otherwise returns
-/// 0.
-int __asan_report_present(void);
-
-/// Gets the PC (program counter) register value of an ASan error (useful for
-/// calling from the debugger).
-///
-/// Returns PC if an error has been (or is being) reported.
-/// Otherwise returns 0.
-///
-/// \returns PC value.
-void *__asan_get_report_pc(void);
-
-/// Gets the BP (base pointer) register value of an ASan error (useful for
-/// calling from the debugger).
-///
-/// Returns BP if an error has been (or is being) reported.
-/// Otherwise returns 0.
-///
-/// \returns BP value.
-void *__asan_get_report_bp(void);
-
-/// Gets the SP (stack pointer) register value of an ASan error (useful for
-/// calling from the debugger).
-///
-/// If an error has been (or is being) reported, returns SP.
-/// Otherwise returns 0.
-///
-/// \returns SP value.
-void *__asan_get_report_sp(void);
-
-/// Gets the address of the report buffer of an ASan error (useful for calling
-/// from the debugger).
-///
-/// Returns the address of the report buffer if an error has been (or is being)
-/// reported. Otherwise returns 0.
-///
-/// \returns Address of report buffer.
-void *__asan_get_report_address(void);
-
-/// Gets access type of an ASan error (useful for calling from the debugger).
-///
-/// Returns access type (read or write) if an error has been (or is being)
-/// reported. Otherwise returns 0.
-///
-/// \returns Access type (0 = read, 1 = write).
-int __asan_get_report_access_type(void);
-
-/// Gets access size of an ASan error (useful for calling from the debugger).
-///
-/// Returns access size if an error has been (or is being) reported. Otherwise
-/// returns 0.
-///
-/// \returns Access size in bytes.
-size_t __asan_get_report_access_size(void);
-
-/// Gets the bug description of an ASan error (useful for calling from a
-/// debugger).
-///
-/// \returns Returns a bug description if an error has been (or is being)
-/// reported - for example, "heap-use-after-free". Otherwise returns an empty
-/// string.
-const char *__asan_get_report_description(void);
-
-/// Gets information about a pointer (useful for calling from the debugger).
-///
-/// Returns the category of the given pointer as a constant string.
-/// Possible return values are <c>global</c>, <c>stack</c>, <c>stack-fake</c>,
-/// <c>heap</c>, <c>heap-invalid</c>, <c>shadow-low</c>, <c>shadow-gap</c>,
-/// <c>shadow-high</c>, and <c>unknown</c>.
-///
-/// If the return value is <c>global</c> or <c>stack</c>, tries to also return
-/// the variable name, address, and size. If the return value is <c>heap</c>,
-/// tries to return the chunk address and size. <c><i>name</i></c> should point
-/// to an allocated buffer of size <c><i>name_size</i></c>.
-///
-/// \param addr Address to locate.
-/// \param name Buffer to store the variable's name.
-/// \param name_size Size in bytes of the variable's name buffer.
-/// \param region_address [out] Address of the region.
-/// \param region_size [out] Size of the region in bytes.
-///
-/// \returns Returns the category of the given pointer as a constant string.
-const char *__asan_locate_address(void *addr, char *name, size_t name_size,
-                                  void **region_address, size_t *region_size);
-
-/// Gets the allocation stack trace and thread ID for a heap address (useful
-/// for calling from the debugger).
-///
-/// Stores up to <c><i>size</i></c> frames in <c><i>trace</i></c>. Returns
-/// the number of stored frames or 0 on error.
-///
-/// \param addr A heap address.
-/// \param trace A buffer to store the stack trace.
-/// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
-///
-/// \returns Returns the number of stored frames or 0 on error.
-size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
-                              int *thread_id);
-
-/// Gets the free stack trace and thread ID for a heap address (useful for
-/// calling from the debugger).
-///
-/// Stores up to <c><i>size</i></c> frames in <c><i>trace</i></c>. Returns
-/// the number of stored frames or 0 on error.
-///
-/// \param addr A heap address.
-/// \param trace A buffer to store the stack trace.
-/// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
-///
-/// \returns Returns the number of stored frames or 0 on error.
-size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
-                             int *thread_id);
-
-/// Gets the current shadow memory mapping (useful for calling from the
-/// debugger).
-///
-/// \param shadow_scale [out] Shadow scale value.
-/// \param shadow_offset [out] Offset value.
-void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
-
-/// This is an internal function that is called to report an error. However,
-/// it is still a part of the interface because you might want to set a
-/// breakpoint on this function in the debugger.
-///
-/// \param pc <c><i>pc</i></c> value of the ASan error.
-/// \param bp <c><i>bp</i></c> value of the ASan error.
-/// \param sp <c><i>sp</i></c> value of the ASan error.
-/// \param addr Address of the ASan error.
-/// \param is_write True if the error is a write error; false otherwise.
-/// \param access_size Size of the memory access of the ASan error.
-void __asan_report_error(void *pc, void *bp, void *sp,
-                         void *addr, int is_write, size_t access_size);
-
-// Deprecated. Call __sanitizer_set_death_callback instead.
-void __asan_set_death_callback(void (*callback)(void));
-
-/// Sets the callback function to be called during ASan error reporting.
-///
-/// The callback provides a string pointer to the report.
-///
-/// \param callback User-provided function.
-void __asan_set_error_report_callback(void (*callback)(const char *));
-
-/// User-provided callback on ASan errors.
-///
-/// You can provide a function that would be called immediately when ASan
-/// detects an error. This is useful in cases when ASan detects an error but
-/// your program crashes before the ASan report is printed.
-void __asan_on_error(void);
-
-/// Prints accumulated statistics to <c>stderr</c> (useful for calling from the
-/// debugger).
-void __asan_print_accumulated_stats(void);
-
-/// User-provided default option settings.
-///
-/// You can provide your own implementation of this function to return a string
-/// containing ASan runtime options (for example,
-/// <c>verbosity=1:halt_on_error=0</c>).
-///
-/// \returns Default options string.
-const char* __asan_default_options(void);
-
-// The following two functions facilitate garbage collection in presence of
-// ASan's fake stack.
-
-/// Gets an opaque handler to the current thread's fake stack.
-///
-/// Returns an opaque handler to be used by
-/// <c>__asan_addr_is_in_fake_stack()</c>. Returns NULL if the current thread
-/// does not have a fake stack.
-///
-/// \returns An opaque handler to the fake stack or NULL.
-void *__asan_get_current_fake_stack(void);
-
-/// Checks if an address belongs to a given fake stack.
-///
-/// If <c><i>fake_stack</i></c> is non-NULL and <c><i>addr</i></c> belongs to a
-/// fake frame in <c><i>fake_stack</i></c>, returns the address of the real
-/// stack that corresponds to the fake frame and sets <c><i>beg</i></c> and
-/// <c><i>end</i></c> to the boundaries of this fake frame. Otherwise returns
-/// NULL and does not touch <c><i>beg</i></c> and <c><i>end</i></c>.
-///
-/// If <c><i>beg</i></c> or <c><i>end</i></c> are NULL, they are not touched.
-///
-/// \note This function can be called from a thread other than the owner of
-/// <c><i>fake_stack</i></c>, but the owner thread needs to be alive.
-///
-/// \param fake_stack An opaque handler to a fake stack.
-/// \param addr Address to test.
-/// \param beg [out] Beginning of fake frame.
-/// \param end [out] End of fake frame.
-/// \returns Stack address or NULL.
-void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
-                                   void **end);
-
-/// Performs shadow memory cleanup of the current thread's stack before a
-/// function marked with the <c>[[noreturn]]</c> attribute is called.
-///
-/// To avoid false positives on the stack, must be called before no-return
-/// functions like <c>_exit()</c> and <c>execl()</c>.
-void __asan_handle_no_return(void);
-
-/// Update allocation stack trace for the given allocation to the current stack
-/// trace. Returns 1 if successfull, 0 if not.
-int __asan_update_allocation_context(void* addr);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // SANITIZER_ASAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
deleted file mode 100644
index f979c6a..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
+++ /dev/null
@@ -1,354 +0,0 @@
-//===-- sanitizer/common_interface_defs.h -----------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Common part of the public sanitizer interface.
-//===----------------------------------------------------------------------===//
-
-#ifndef SANITIZER_COMMON_INTERFACE_DEFS_H
-#define SANITIZER_COMMON_INTERFACE_DEFS_H
-
-#include <stddef.h>
-#include <stdint.h>
-
-// GCC does not understand __has_feature.
-#if !defined(__has_feature)
-#define __has_feature(x) 0
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-// Arguments for __sanitizer_sandbox_on_notify() below.
-typedef struct {
-  // Enable sandbox support in sanitizer coverage.
-  int coverage_sandboxed;
-  // File descriptor to write coverage data to. If -1 is passed, a file will
-  // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
-  // effect if coverage_sandboxed == 0.
-  intptr_t coverage_fd;
-  // If non-zero, split the coverage data into well-formed blocks. This is
-  // useful when coverage_fd is a socket descriptor. Each block will contain
-  // a header, allowing data from multiple processes to be sent over the same
-  // socket.
-  unsigned int coverage_max_block_size;
-} __sanitizer_sandbox_arguments;
-
-// Tell the tools to write their reports to "path.<pid>" instead of stderr.
-void __sanitizer_set_report_path(const char *path);
-// Tell the tools to write their reports to the provided file descriptor
-// (casted to void *).
-void __sanitizer_set_report_fd(void *fd);
-
-// Notify the tools that the sandbox is going to be turned on. The reserved
-// parameter will be used in the future to hold a structure with functions
-// that the tools may call to bypass the sandbox.
-void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
-
-// This function is called by the tool when it has just finished reporting
-// an error. 'error_summary' is a one-line string that summarizes
-// the error message. This function can be overridden by the client.
-void __sanitizer_report_error_summary(const char *error_summary);
-
-// Some of the sanitizers (for example ASan/TSan) could miss bugs that happen
-// in unaligned loads/stores. To find such bugs reliably, you need to replace
-// plain unaligned loads/stores with these calls.
-
-/// Loads a 16-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-///
-/// \returns Loaded value.
-uint16_t __sanitizer_unaligned_load16(const void *p);
-
-/// Loads a 32-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-///
-/// \returns Loaded value.
-uint32_t __sanitizer_unaligned_load32(const void *p);
-
-/// Loads a 64-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-///
-/// \returns Loaded value.
-uint64_t __sanitizer_unaligned_load64(const void *p);
-
-/// Stores a 16-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-/// \param x 16-bit value to store.
-void __sanitizer_unaligned_store16(void *p, uint16_t x);
-
-/// Stores a 32-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-/// \param x 32-bit value to store.
-void __sanitizer_unaligned_store32(void *p, uint32_t x);
-
-/// Stores a 64-bit unaligned value.
-///
-/// \param p Pointer to unaligned memory.
-/// \param x 64-bit value to store.
-void __sanitizer_unaligned_store64(void *p, uint64_t x);
-
-// Returns 1 on the first call, then returns 0 thereafter.  Called by the tool
-// to ensure only one report is printed when multiple errors occur
-// simultaneously.
-int __sanitizer_acquire_crash_state();
-
-/// Annotates the current state of a contiguous container, such as
-/// <c>std::vector</c>, <c>std::string</c>, or similar.
-///
-/// A contiguous container is a container that keeps all of its elements
-/// in a contiguous region of memory. The container owns the region of memory
-/// <c>[beg, end)</c>; the memory <c>[beg, mid)</c> is used to store the
-/// current elements, and the memory <c>[mid, end)</c> is reserved for future
-/// elements (<c>beg <= mid <= end</c>). For example, in
-/// <c>std::vector<> v</c>:
-///
-/// \code
-///   beg = &v[0];
-///   end = beg + v.capacity() * sizeof(v[0]);
-///   mid = beg + v.size()     * sizeof(v[0]);
-/// \endcode
-///
-/// This annotation tells the Sanitizer tool about the current state of the
-/// container so that the tool can report errors when memory from
-/// <c>[mid, end)</c> is accessed. Insert this annotation into methods like
-/// <c>push_back()</c> or <c>pop_back()</c>. Supply the old and new values of
-/// <c>mid</c>(<c><i>old_mid</i></c> and <c><i>new_mid</i></c>). In the initial
-/// state <c>mid == end</c>, so that should be the final state when the
-/// container is destroyed or when the container reallocates the storage.
-///
-/// For ASan, <c><i>beg</i></c> should be 8-aligned and <c><i>end</i></c>
-/// should be either 8-aligned or it should point to the end of a separate
-/// heap-, stack-, or global-allocated buffer. So the following example will
-/// not work:
-///
-/// \code
-///   int64_t x[2]; // 16 bytes, 8-aligned
-///   char *beg = (char *)&x[0];
-///   char *end = beg + 12; // Not 8-aligned, not the end of the buffer
-/// \endcode
-///
-/// The following, however, will work:
-/// \code
-///   int32_t x[3]; // 12 bytes, but 8-aligned under ASan.
-///   char *beg = (char*)&x[0];
-///   char *end = beg + 12; // Not 8-aligned, but is the end of the buffer
-/// \endcode
-///
-/// \note  Use this function with caution and do not use for anything other
-/// than vector-like classes.
-///
-/// \param beg Beginning of memory region.
-/// \param end End of memory region.
-/// \param old_mid Old middle of memory region.
-/// \param new_mid New middle of memory region.
-void __sanitizer_annotate_contiguous_container(const void *beg,
-                                               const void *end,
-                                               const void *old_mid,
-                                               const void *new_mid);
-
-/// Returns true if the contiguous container <c>[beg, end)</c> is properly
-/// poisoned.
-///
-/// Proper poisoning could occur, for example, with
-/// <c>__sanitizer_annotate_contiguous_container</c>), that is, if
-/// <c>[beg, mid)</c> is addressable and <c>[mid, end)</c> is unaddressable.
-/// Full verification requires O (<c>end - beg</c>) time; this function tries
-/// to avoid such complexity by touching only parts of the container around
-/// <c><i>beg</i></c>, <c><i>mid</i></c>, and <c><i>end</i></c>.
-///
-/// \param beg Beginning of memory region.
-/// \param mid Middle of memory region.
-/// \param end Old end of memory region.
-///
-/// \returns True if the contiguous container <c>[beg, end)</c> is properly
-///  poisoned.
-int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
-                                            const void *end);
-
-/// Similar to <c>__sanitizer_verify_contiguous_container()</c> but also
-/// returns the address of the first improperly poisoned byte.
-///
-/// Returns NULL if the area is poisoned properly.
-///
-/// \param beg Beginning of memory region.
-/// \param mid Middle of memory region.
-/// \param end Old end of memory region.
-///
-/// \returns The bad address or NULL.
-const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
-                                                              const void *mid,
-                                                              const void *end);
-
-/// Prints the stack trace leading to this call (useful for calling from the
-/// debugger).
-void __sanitizer_print_stack_trace(void);
-
-// Symbolizes the supplied 'pc' using the format string 'fmt'.
-// Outputs at most 'out_buf_size' bytes into 'out_buf'.
-// If 'out_buf' is not empty then output is zero or more non empty C strings
-// followed by single empty C string. Multiple strings can be returned if PC
-// corresponds to inlined function. Inlined frames are printed in the order
-// from "most-inlined" to the "least-inlined", so the last frame should be the
-// not inlined function.
-// Inlined frames can be removed with 'symbolize_inline_frames=0'.
-// The format syntax is described in
-// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
-void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
-                              size_t out_buf_size);
-// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
-void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
-                                  char *out_buf, size_t out_buf_size);
-
-/// Sets the callback to be called immediately before death on error.
-///
-/// Passing 0 will unset the callback.
-///
-/// \param callback User-provided callback.
-void __sanitizer_set_death_callback(void (*callback)(void));
-
-
-// Interceptor hooks.
-// Whenever a libc function interceptor is called, it checks if the
-// corresponding weak hook is defined, and calls it if it is indeed defined.
-// The primary use-case is data-flow-guided fuzzing, where the fuzzer needs
-// to know what is being passed to libc functions (for example memcmp).
-// FIXME: implement more hooks.
-
-/// Interceptor hook for <c>memcmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param n Number of bytes to compare.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
-                                  const void *s2, size_t n, int result);
-
-/// Interceptor hook for <c>strncmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param n Number of bytes to compare.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
-                                  const char *s2, size_t n, int result);
-
-/// Interceptor hook for <c>strncasecmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param n Number of bytes to compare.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
-                                       const char *s2, size_t n, int result);
-
-/// Interceptor hook for <c>strcmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
-                                  const char *s2, int result);
-
-/// Interceptor hook for <c>strcasecmp()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
-                                      const char *s2, int result);
-
-/// Interceptor hook for <c>strstr()</c>.
-///
-/// \param called_pc PC (program counter) address of the original call.
-/// \param s1 Pointer to block of memory.
-/// \param s2 Pointer to block of memory.
-/// \param result Value returned by the intercepted function.
-void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
-                                  const char *s2, char *result);
-
-void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
-                                      const char *s2, char *result);
-
-void __sanitizer_weak_hook_memmem(void *called_pc,
-                                  const void *s1, size_t len1,
-                                  const void *s2, size_t len2, void *result);
-
-// Prints stack traces for all live heap allocations ordered by total
-// allocation size until top_percent of total live heap is shown. top_percent
-// should be between 1 and 100. At most max_number_of_contexts contexts
-// (stack traces) are printed.
-// Experimental feature currently available only with ASan on Linux/x86_64.
-void __sanitizer_print_memory_profile(size_t top_percent,
-                                      size_t max_number_of_contexts);
-
-/// Notify ASan that a fiber switch has started (required only if implementing
-/// your own fiber library).
-///
-/// Before switching to a different stack, you must call
-/// <c>__sanitizer_start_switch_fiber()</c> with a pointer to the bottom of the
-/// destination stack and with its size. When code starts running on the new
-/// stack, it must call <c>__sanitizer_finish_switch_fiber()</c> to finalize
-/// the switch. The <c>__sanitizer_start_switch_fiber()</c> function takes a
-/// <c>void**</c> pointer argument to store the current fake stack if there is
-/// one (it is necessary when the runtime option
-/// <c>detect_stack_use_after_return</c> is enabled).
-///
-/// When restoring a stack, this <c>void**</c> pointer must be given to the
-/// <c>__sanitizer_finish_switch_fiber()</c> function. In most cases, this
-/// pointer can be stored on the stack immediately before switching. When
-/// leaving a fiber definitely, NULL must be passed as the first argument to
-/// the <c>__sanitizer_start_switch_fiber()</c> function so that the fake stack
-/// is destroyed. If your program does not need stack use-after-return
-/// detection, you can always pass NULL to these two functions.
-///
-/// \note The fake stack mechanism is disabled during fiber switch, so if a
-/// signal callback runs during the switch, it will not benefit from stack
-/// use-after-return detection.
-///
-/// \param fake_stack_save [out] Fake stack save location.
-/// \param bottom Bottom address of stack.
-/// \param size Size of stack in bytes.
-void __sanitizer_start_switch_fiber(void **fake_stack_save,
-                                    const void *bottom, size_t size);
-
-/// Notify ASan that a fiber switch has completed (required only if
-/// implementing your own fiber library).
-///
-/// When code starts running on the new stack, it must call
-/// <c>__sanitizer_finish_switch_fiber()</c> to finalize
-/// the switch. For usage details, see the description of
-/// <c>__sanitizer_start_switch_fiber()</c>.
-///
-/// \param fake_stack_save Fake stack save location.
-/// \param bottom_old [out] Bottom address of old stack.
-/// \param size_old [out] Size of old stack in bytes.
-void __sanitizer_finish_switch_fiber(void *fake_stack_save,
-                                     const void **bottom_old,
-                                     size_t *size_old);
-
-// Get full module name and calculate pc offset within it.
-// Returns 1 if pc belongs to some module, 0 if module was not found.
-int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
-                                             size_t module_path_len,
-                                             void **pc_offset);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // SANITIZER_COMMON_INTERFACE_DEFS_H
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
deleted file mode 100644
index 81546e5..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- dfsan_interface.h -------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of DataFlowSanitizer.
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef DFSAN_INTERFACE_H
-#define DFSAN_INTERFACE_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef uint16_t dfsan_label;
-
-/// Stores information associated with a specific label identifier.  A label
-/// may be a base label created using dfsan_create_label, with associated
-/// text description and user data, or an automatically created union label,
-/// which represents the union of two label identifiers (which may themselves
-/// be base or union labels).
-struct dfsan_label_info {
-  // Fields for union labels, set to 0 for base labels.
-  dfsan_label l1;
-  dfsan_label l2;
-
-  // Fields for base labels.
-  const char *desc;
-  void *userdata;
-};
-
-/// Signature of the callback argument to dfsan_set_write_callback().
-typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
-
-/// Computes the union of \c l1 and \c l2, possibly creating a union label in
-/// the process.
-dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
-
-/// Creates and returns a base label with the given description and user data.
-dfsan_label dfsan_create_label(const char *desc, void *userdata);
-
-/// Sets the label for each address in [addr,addr+size) to \c label.
-void dfsan_set_label(dfsan_label label, void *addr, size_t size);
-
-/// Sets the label for each address in [addr,addr+size) to the union of the
-/// current label for that address and \c label.
-void dfsan_add_label(dfsan_label label, void *addr, size_t size);
-
-/// Retrieves the label associated with the given data.
-///
-/// The type of 'data' is arbitrary.  The function accepts a value of any type,
-/// which can be truncated or extended (implicitly or explicitly) as necessary.
-/// The truncation/extension operations will preserve the label of the original
-/// value.
-dfsan_label dfsan_get_label(long data);
-
-/// Retrieves the label associated with the data at the given address.
-dfsan_label dfsan_read_label(const void *addr, size_t size);
-
-/// Retrieves a pointer to the dfsan_label_info struct for the given label.
-const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
-
-/// Returns whether the given label label contains the label elem.
-int dfsan_has_label(dfsan_label label, dfsan_label elem);
-
-/// If the given label label contains a label with the description desc, returns
-/// that label, else returns 0.
-dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
-
-/// Returns the number of labels allocated.
-size_t dfsan_get_label_count(void);
-
-/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
-/// with the application memory. Will work only if there are no other
-/// threads executing DFSan-instrumented code concurrently.
-/// Use this call to start over the taint tracking within the same procces.
-void dfsan_flush(void);
-
-/// Sets a callback to be invoked on calls to write().  The callback is invoked
-/// before the write is done.  The write is not guaranteed to succeed when the
-/// callback executes.  Pass in NULL to remove any callback.
-void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
-
-/// Writes the labels currently used by the program to the given file
-/// descriptor. The lines of the output have the following format:
-///
-/// <label> <parent label 1> <parent label 2> <label description if any>
-void dfsan_dump_labels(int fd);
-
-/// Interceptor hooks.
-/// Whenever a dfsan's custom function is called the corresponding
-/// hook is called it non-zero. The hooks should be defined by the user.
-/// The primary use case is taint-guided fuzzing, where the fuzzer
-/// needs to see the parameters of the function and the labels.
-/// FIXME: implement more hooks.
-void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
-                            size_t n, dfsan_label s1_label,
-                            dfsan_label s2_label, dfsan_label n_label);
-void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
-                             size_t n, dfsan_label s1_label,
-                             dfsan_label s2_label, dfsan_label n_label);
-#ifdef __cplusplus
-}  // extern "C"
-
-template <typename T>
-void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
-  dfsan_set_label(label, (void *)&data, sizeof(T));
-}
-
-#endif
-
-#endif  // DFSAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
deleted file mode 100644
index 4c9ad13..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
+++ /dev/null
@@ -1,96 +0,0 @@
-//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of HWAddressSanitizer.
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_HWASAN_INTERFACE_H
-#define SANITIZER_HWASAN_INTERFACE_H
-
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-  // Libc hook for program startup in statically linked executables.
-  // Initializes enough of the runtime to run instrumented code. This function
-  // should only be called in statically linked executables because it modifies
-  // the GOT, which won't work in regular binaries because RELRO will already
-  // have been applied by the time the function is called. This also means that
-  // the function should be called before libc applies RELRO.
-  // Does not call libc unless there is an error.
-  // Can be called multiple times.
-  void __hwasan_init_static(void);
-
-  // This function may be optionally provided by user and should return
-  // a string containing HWASan runtime options. See asan_flags.h for details.
-  const char* __hwasan_default_options(void);
-
-  void __hwasan_enable_allocator_tagging(void);
-  void __hwasan_disable_allocator_tagging(void);
-
-  // Mark region of memory with the given tag. Both address and size need to be
-  // 16-byte aligned.
-  void __hwasan_tag_memory(const volatile void *p, unsigned char tag,
-                           size_t size);
-
-  /// Set pointer tag. Previous tag is lost.
-  void *__hwasan_tag_pointer(const volatile void *p, unsigned char tag);
-
-  // Set memory tag from the current SP address to the given address to zero.
-  // This is meant to annotate longjmp and other non-local jumps.
-  // This function needs to know the (almost) exact destination frame address;
-  // clearing shadow for the entire thread stack like __asan_handle_no_return
-  // does would cause false reports.
-  void __hwasan_handle_longjmp(const void *sp_dst);
-
-  // Set memory tag for the part of the current thread stack below sp_dst to
-  // zero. Call this in vfork() before returning in the parent process.
-  void __hwasan_handle_vfork(const void *sp_dst);
-
-  // Libc hook for thread creation. Should be called in the child thread before
-  // any instrumented code.
-  void __hwasan_thread_enter();
-
-  // Libc hook for thread destruction. No instrumented code should run after
-  // this call.
-  void __hwasan_thread_exit();
-
-  // Print shadow and origin for the memory range to stderr in a human-readable
-  // format.
-  void __hwasan_print_shadow(const volatile void *x, size_t size);
-
-  // Print one-line report about the memory usage of the current process.
-  void __hwasan_print_memory_usage();
-
-  /* Returns the offset of the first byte in the memory range that can not be
-   * accessed through the pointer in x, or -1 if the whole range is good. */
-  intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
-
-  int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
-  void * __sanitizer_memalign(size_t alignment, size_t size);
-  void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
-  void * __sanitizer___libc_memalign(size_t alignment, size_t size);
-  void * __sanitizer_valloc(size_t size);
-  void * __sanitizer_pvalloc(size_t size);
-  void __sanitizer_free(void *ptr);
-  void __sanitizer_cfree(void *ptr);
-  size_t __sanitizer_malloc_usable_size(const void *ptr);
-  struct mallinfo __sanitizer_mallinfo();
-  int __sanitizer_mallopt(int cmd, int value);
-  void __sanitizer_malloc_stats(void);
-  void * __sanitizer_calloc(size_t nmemb, size_t size);
-  void * __sanitizer_realloc(void *ptr, size_t size);
-  void * __sanitizer_reallocarray(void *ptr, size_t nmemb, size_t size);
-  void * __sanitizer_malloc(size_t size);
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif  // SANITIZER_HWASAN_INTERFACE_H
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
deleted file mode 100644
index d40c556..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
+++ /dev/null
@@ -1,121 +0,0 @@
-//===-- msan_interface.h --------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of MemorySanitizer.
-//
-// Public interface header.
-//===----------------------------------------------------------------------===//
-#ifndef MSAN_INTERFACE_H
-#define MSAN_INTERFACE_H
-
-#include <sanitizer/common_interface_defs.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-  /* Set raw origin for the memory range. */
-  void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
-
-  /* Get raw origin for an address. */
-  uint32_t __msan_get_origin(const volatile void *a);
-
-  /* Test that this_id is a descendant of prev_id (or they are simply equal).
-   * "descendant" here means they are part of the same chain, created with
-   * __msan_chain_origin. */
-  int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
-
-  /* Returns non-zero if tracking origins. */
-  int __msan_get_track_origins(void);
-
-  /* Returns the origin id of the latest UMR in the calling thread. */
-  uint32_t __msan_get_umr_origin(void);
-
-  /* Make memory region fully initialized (without changing its contents). */
-  void __msan_unpoison(const volatile void *a, size_t size);
-
-  /* Make a null-terminated string fully initialized (without changing its
-     contents). */
-  void __msan_unpoison_string(const volatile char *a);
-
-  /* Make first n parameters of the next function call fully initialized. */
-  void __msan_unpoison_param(size_t n);
-
-  /* Make memory region fully uninitialized (without changing its contents).
-     This is a legacy interface that does not update origin information. Use
-     __msan_allocated_memory() instead. */
-  void __msan_poison(const volatile void *a, size_t size);
-
-  /* Make memory region partially uninitialized (without changing its contents).
-   */
-  void __msan_partial_poison(const volatile void *data, void *shadow,
-                             size_t size);
-
-  /* Returns the offset of the first (at least partially) poisoned byte in the
-     memory range, or -1 if the whole range is good. */
-  intptr_t __msan_test_shadow(const volatile void *x, size_t size);
-
-  /* Checks that memory range is fully initialized, and reports an error if it
-   * is not. */
-  void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
-
-  /* For testing:
-     __msan_set_expect_umr(1);
-     ... some buggy code ...
-     __msan_set_expect_umr(0);
-     The last line will verify that a UMR happened. */
-  void __msan_set_expect_umr(int expect_umr);
-
-  /* Change the value of keep_going flag. Non-zero value means don't terminate
-     program execution when an error is detected. This will not affect error in
-     modules that were compiled without the corresponding compiler flag. */
-  void __msan_set_keep_going(int keep_going);
-
-  /* Print shadow and origin for the memory range to stderr in a human-readable
-     format. */
-  void __msan_print_shadow(const volatile void *x, size_t size);
-
-  /* Print shadow for the memory range to stderr in a minimalistic
-     human-readable format. */
-  void __msan_dump_shadow(const volatile void *x, size_t size);
-
-  /* Returns true if running under a dynamic tool (DynamoRio-based). */
-  int  __msan_has_dynamic_component(void);
-
-  /* Tell MSan about newly allocated memory (ex.: custom allocator).
-     Memory will be marked uninitialized, with origin at the call site. */
-  void __msan_allocated_memory(const volatile void* data, size_t size);
-
-  /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
-  void __sanitizer_dtor_callback(const volatile void* data, size_t size);
-
-  /* This function may be optionally provided by user and should return
-     a string containing Msan runtime options. See msan_flags.h for details. */
-  const char* __msan_default_options(void);
-
-  /* Deprecated. Call __sanitizer_set_death_callback instead. */
-  void __msan_set_death_callback(void (*callback)(void));
-
-  /* Update shadow for the application copy of size bytes from src to dst.
-     Src and dst are application addresses. This function does not copy the
-     actual application memory, it only updates shadow and origin for such
-     copy. Source and destination regions can overlap. */
-  void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
-                          size_t size);
-
-  /* Disables uninitialized memory checks in interceptors. */
-  void __msan_scoped_disable_interceptor_checks(void);
-
-  /* Re-enables uninitialized memory checks in interceptors after a previous
-     call to __msan_scoped_disable_interceptor_checks. */
-  void __msan_scoped_enable_interceptor_checks(void);
-
-#ifdef __cplusplus
-}  // extern "C"
-#endif
-
-#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h b/linux-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
deleted file mode 100644
index 370da0e..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
+++ /dev/null
@@ -1,4812 +0,0 @@
-//===-- netbsd_syscall_hooks.h --------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of public sanitizer interface.
-//
-// System call handlers.
-//
-// Interface methods declared in this header implement pre- and post- syscall
-// actions for the active sanitizer.
-// Usage:
-//   __sanitizer_syscall_pre_getfoo(...args...);
-//   long long res = syscall(SYS_getfoo, ...args...);
-//   __sanitizer_syscall_post_getfoo(res, ...args...);
-//
-// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
-//
-// Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2019-12-24
-// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
-//
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
-#define SANITIZER_NETBSD_SYSCALL_HOOKS_H
-
-#define __sanitizer_syscall_pre_syscall(code, arg0, arg1, arg2, arg3, arg4,    \
-                                        arg5, arg6, arg7)                      \
-  __sanitizer_syscall_pre_impl_syscall(                                        \
-      (long long)(code), (long long)(arg0), (long long)(arg1),                 \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_post_syscall(res, code, arg0, arg1, arg2, arg3,    \
-                                         arg4, arg5, arg6, arg7)               \
-  __sanitizer_syscall_post_impl_syscall(                                       \
-      res, (long long)(code), (long long)(arg0), (long long)(arg1),            \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_pre_exit(rval)                                     \
-  __sanitizer_syscall_pre_impl_exit((long long)(rval))
-#define __sanitizer_syscall_post_exit(res, rval)                               \
-  __sanitizer_syscall_post_impl_exit(res, (long long)(rval))
-#define __sanitizer_syscall_pre_fork() __sanitizer_syscall_pre_impl_fork()
-#define __sanitizer_syscall_post_fork(res)                                     \
-  __sanitizer_syscall_post_impl_fork(res)
-#define __sanitizer_syscall_pre_read(fd, buf, nbyte)                           \
-  __sanitizer_syscall_pre_impl_read((long long)(fd), (long long)(buf),         \
-                                    (long long)(nbyte))
-#define __sanitizer_syscall_post_read(res, fd, buf, nbyte)                     \
-  __sanitizer_syscall_post_impl_read(res, (long long)(fd), (long long)(buf),   \
-                                     (long long)(nbyte))
-#define __sanitizer_syscall_pre_write(fd, buf, nbyte)                          \
-  __sanitizer_syscall_pre_impl_write((long long)(fd), (long long)(buf),        \
-                                     (long long)(nbyte))
-#define __sanitizer_syscall_post_write(res, fd, buf, nbyte)                    \
-  __sanitizer_syscall_post_impl_write(res, (long long)(fd), (long long)(buf),  \
-                                      (long long)(nbyte))
-#define __sanitizer_syscall_pre_open(path, flags, mode)                        \
-  __sanitizer_syscall_pre_impl_open((long long)(path), (long long)(flags),     \
-                                    (long long)(mode))
-#define __sanitizer_syscall_post_open(res, path, flags, mode)                  \
-  __sanitizer_syscall_post_impl_open(res, (long long)(path),                   \
-                                     (long long)(flags), (long long)(mode))
-#define __sanitizer_syscall_pre_close(fd)                                      \
-  __sanitizer_syscall_pre_impl_close((long long)(fd))
-#define __sanitizer_syscall_post_close(res, fd)                                \
-  __sanitizer_syscall_post_impl_close(res, (long long)(fd))
-#define __sanitizer_syscall_pre_compat_50_wait4(pid, status, options, rusage)  \
-  __sanitizer_syscall_pre_impl_compat_50_wait4(                                \
-      (long long)(pid), (long long)(status), (long long)(options),             \
-      (long long)(rusage))
-#define __sanitizer_syscall_post_compat_50_wait4(res, pid, status, options,    \
-                                                 rusage)                       \
-  __sanitizer_syscall_post_impl_compat_50_wait4(                               \
-      res, (long long)(pid), (long long)(status), (long long)(options),        \
-      (long long)(rusage))
-#define __sanitizer_syscall_pre_compat_43_ocreat(path, mode)                   \
-  __sanitizer_syscall_pre_impl_compat_43_ocreat((long long)(path),             \
-                                                (long long)(mode))
-#define __sanitizer_syscall_post_compat_43_ocreat(res, path, mode)             \
-  __sanitizer_syscall_post_impl_compat_43_ocreat(res, (long long)(path),       \
-                                                 (long long)(mode))
-#define __sanitizer_syscall_pre_link(path, link)                               \
-  __sanitizer_syscall_pre_impl_link((long long)(path), (long long)(link))
-#define __sanitizer_syscall_post_link(res, path, link)                         \
-  __sanitizer_syscall_post_impl_link(res, (long long)(path), (long long)(link))
-#define __sanitizer_syscall_pre_unlink(path)                                   \
-  __sanitizer_syscall_pre_impl_unlink((long long)(path))
-#define __sanitizer_syscall_post_unlink(res, path)                             \
-  __sanitizer_syscall_post_impl_unlink(res, (long long)(path))
-/* syscall 11 has been skipped */
-#define __sanitizer_syscall_pre_chdir(path)                                    \
-  __sanitizer_syscall_pre_impl_chdir((long long)(path))
-#define __sanitizer_syscall_post_chdir(res, path)                              \
-  __sanitizer_syscall_post_impl_chdir(res, (long long)(path))
-#define __sanitizer_syscall_pre_fchdir(fd)                                     \
-  __sanitizer_syscall_pre_impl_fchdir((long long)(fd))
-#define __sanitizer_syscall_post_fchdir(res, fd)                               \
-  __sanitizer_syscall_post_impl_fchdir(res, (long long)(fd))
-#define __sanitizer_syscall_pre_compat_50_mknod(path, mode, dev)               \
-  __sanitizer_syscall_pre_impl_compat_50_mknod(                                \
-      (long long)(path), (long long)(mode), (long long)(dev))
-#define __sanitizer_syscall_post_compat_50_mknod(res, path, mode, dev)         \
-  __sanitizer_syscall_post_impl_compat_50_mknod(                               \
-      res, (long long)(path), (long long)(mode), (long long)(dev))
-#define __sanitizer_syscall_pre_chmod(path, mode)                              \
-  __sanitizer_syscall_pre_impl_chmod((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_chmod(res, path, mode)                        \
-  __sanitizer_syscall_post_impl_chmod(res, (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_chown(path, uid, gid)                          \
-  __sanitizer_syscall_pre_impl_chown((long long)(path), (long long)(uid),      \
-                                     (long long)(gid))
-#define __sanitizer_syscall_post_chown(res, path, uid, gid)                    \
-  __sanitizer_syscall_post_impl_chown(res, (long long)(path),                  \
-                                      (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre_break(nsize)                                   \
-  __sanitizer_syscall_pre_impl_break((long long)(nsize))
-#define __sanitizer_syscall_post_break(res, nsize)                             \
-  __sanitizer_syscall_post_impl_break(res, (long long)(nsize))
-#define __sanitizer_syscall_pre_compat_20_getfsstat(buf, bufsize, flags)       \
-  __sanitizer_syscall_pre_impl_compat_20_getfsstat(                            \
-      (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_post_compat_20_getfsstat(res, buf, bufsize, flags) \
-  __sanitizer_syscall_post_impl_compat_20_getfsstat(                           \
-      res, (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_43_olseek(fd, offset, whence)           \
-  __sanitizer_syscall_pre_impl_compat_43_olseek(                               \
-      (long long)(fd), (long long)(offset), (long long)(whence))
-#define __sanitizer_syscall_post_compat_43_olseek(res, fd, offset, whence)     \
-  __sanitizer_syscall_post_impl_compat_43_olseek(                              \
-      res, (long long)(fd), (long long)(offset), (long long)(whence))
-#define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid()
-#define __sanitizer_syscall_post_getpid(res)                                   \
-  __sanitizer_syscall_post_impl_getpid(res)
-#define __sanitizer_syscall_pre_compat_40_mount(type, path, flags, data)       \
-  __sanitizer_syscall_pre_impl_compat_40_mount(                                \
-      (long long)(type), (long long)(path), (long long)(flags),                \
-      (long long)(data))
-#define __sanitizer_syscall_post_compat_40_mount(res, type, path, flags, data) \
-  __sanitizer_syscall_post_impl_compat_40_mount(                               \
-      res, (long long)(type), (long long)(path), (long long)(flags),           \
-      (long long)(data))
-#define __sanitizer_syscall_pre_unmount(path, flags)                           \
-  __sanitizer_syscall_pre_impl_unmount((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_unmount(res, path, flags)                     \
-  __sanitizer_syscall_post_impl_unmount(res, (long long)(path),                \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_setuid(uid)                                    \
-  __sanitizer_syscall_pre_impl_setuid((long long)(uid))
-#define __sanitizer_syscall_post_setuid(res, uid)                              \
-  __sanitizer_syscall_post_impl_setuid(res, (long long)(uid))
-#define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid()
-#define __sanitizer_syscall_post_getuid(res)                                   \
-  __sanitizer_syscall_post_impl_getuid(res)
-#define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid()
-#define __sanitizer_syscall_post_geteuid(res)                                  \
-  __sanitizer_syscall_post_impl_geteuid(res)
-#define __sanitizer_syscall_pre_ptrace(req, pid, addr, data)                   \
-  __sanitizer_syscall_pre_impl_ptrace((long long)(req), (long long)(pid),      \
-                                      (long long)(addr), (long long)(data))
-#define __sanitizer_syscall_post_ptrace(res, req, pid, addr, data)             \
-  __sanitizer_syscall_post_impl_ptrace(res, (long long)(req),                  \
-                                       (long long)(pid), (long long)(addr),    \
-                                       (long long)(data))
-#define __sanitizer_syscall_pre_recvmsg(s, msg, flags)                         \
-  __sanitizer_syscall_pre_impl_recvmsg((long long)(s), (long long)(msg),       \
-                                       (long long)(flags))
-#define __sanitizer_syscall_post_recvmsg(res, s, msg, flags)                   \
-  __sanitizer_syscall_post_impl_recvmsg(res, (long long)(s), (long long)(msg), \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_sendmsg(s, msg, flags)                         \
-  __sanitizer_syscall_pre_impl_sendmsg((long long)(s), (long long)(msg),       \
-                                       (long long)(flags))
-#define __sanitizer_syscall_post_sendmsg(res, s, msg, flags)                   \
-  __sanitizer_syscall_post_impl_sendmsg(res, (long long)(s), (long long)(msg), \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_recvfrom(s, buf, len, flags, from,             \
-                                         fromlenaddr)                          \
-  __sanitizer_syscall_pre_impl_recvfrom(                                       \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags),  \
-      (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_post_recvfrom(res, s, buf, len, flags, from,       \
-                                          fromlenaddr)                         \
-  __sanitizer_syscall_post_impl_recvfrom(                                      \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags), (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_pre_accept(s, name, anamelen)                      \
-  __sanitizer_syscall_pre_impl_accept((long long)(s), (long long)(name),       \
-                                      (long long)(anamelen))
-#define __sanitizer_syscall_post_accept(res, s, name, anamelen)                \
-  __sanitizer_syscall_post_impl_accept(res, (long long)(s), (long long)(name), \
-                                       (long long)(anamelen))
-#define __sanitizer_syscall_pre_getpeername(fdes, asa, alen)                   \
-  __sanitizer_syscall_pre_impl_getpeername(                                    \
-      (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_getpeername(res, fdes, asa, alen)             \
-  __sanitizer_syscall_post_impl_getpeername(                                   \
-      res, (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_pre_getsockname(fdes, asa, alen)                   \
-  __sanitizer_syscall_pre_impl_getsockname(                                    \
-      (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_getsockname(res, fdes, asa, alen)             \
-  __sanitizer_syscall_post_impl_getsockname(                                   \
-      res, (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_pre_access(path, flags)                            \
-  __sanitizer_syscall_pre_impl_access((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_access(res, path, flags)                      \
-  __sanitizer_syscall_post_impl_access(res, (long long)(path),                 \
-                                       (long long)(flags))
-#define __sanitizer_syscall_pre_chflags(path, flags)                           \
-  __sanitizer_syscall_pre_impl_chflags((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_chflags(res, path, flags)                     \
-  __sanitizer_syscall_post_impl_chflags(res, (long long)(path),                \
-                                        (long long)(flags))
-#define __sanitizer_syscall_pre_fchflags(fd, flags)                            \
-  __sanitizer_syscall_pre_impl_fchflags((long long)(fd), (long long)(flags))
-#define __sanitizer_syscall_post_fchflags(res, fd, flags)                      \
-  __sanitizer_syscall_post_impl_fchflags(res, (long long)(fd),                 \
-                                         (long long)(flags))
-#define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync()
-#define __sanitizer_syscall_post_sync(res)                                     \
-  __sanitizer_syscall_post_impl_sync(res)
-#define __sanitizer_syscall_pre_kill(pid, signum)                              \
-  __sanitizer_syscall_pre_impl_kill((long long)(pid), (long long)(signum))
-#define __sanitizer_syscall_post_kill(res, pid, signum)                        \
-  __sanitizer_syscall_post_impl_kill(res, (long long)(pid), (long long)(signum))
-#define __sanitizer_syscall_pre_compat_43_stat43(path, ub)                     \
-  __sanitizer_syscall_pre_impl_compat_43_stat43((long long)(path),             \
-                                                (long long)(ub))
-#define __sanitizer_syscall_post_compat_43_stat43(res, path, ub)               \
-  __sanitizer_syscall_post_impl_compat_43_stat43(res, (long long)(path),       \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid()
-#define __sanitizer_syscall_post_getppid(res)                                  \
-  __sanitizer_syscall_post_impl_getppid(res)
-#define __sanitizer_syscall_pre_compat_43_lstat43(path, ub)                    \
-  __sanitizer_syscall_pre_impl_compat_43_lstat43((long long)(path),            \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_post_compat_43_lstat43(res, path, ub)              \
-  __sanitizer_syscall_post_impl_compat_43_lstat43(res, (long long)(path),      \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_pre_dup(fd)                                        \
-  __sanitizer_syscall_pre_impl_dup((long long)(fd))
-#define __sanitizer_syscall_post_dup(res, fd)                                  \
-  __sanitizer_syscall_post_impl_dup(res, (long long)(fd))
-#define __sanitizer_syscall_pre_pipe() __sanitizer_syscall_pre_impl_pipe()
-#define __sanitizer_syscall_post_pipe(res)                                     \
-  __sanitizer_syscall_post_impl_pipe(res)
-#define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid()
-#define __sanitizer_syscall_post_getegid(res)                                  \
-  __sanitizer_syscall_post_impl_getegid(res)
-#define __sanitizer_syscall_pre_profil(samples, size, offset, scale)           \
-  __sanitizer_syscall_pre_impl_profil((long long)(samples), (long long)(size), \
-                                      (long long)(offset), (long long)(scale))
-#define __sanitizer_syscall_post_profil(res, samples, size, offset, scale)     \
-  __sanitizer_syscall_post_impl_profil(res, (long long)(samples),              \
-                                       (long long)(size), (long long)(offset), \
-                                       (long long)(scale))
-#define __sanitizer_syscall_pre_ktrace(fname, ops, facs, pid)                  \
-  __sanitizer_syscall_pre_impl_ktrace((long long)(fname), (long long)(ops),    \
-                                      (long long)(facs), (long long)(pid))
-#define __sanitizer_syscall_post_ktrace(res, fname, ops, facs, pid)            \
-  __sanitizer_syscall_post_impl_ktrace(res, (long long)(fname),                \
-                                       (long long)(ops), (long long)(facs),    \
-                                       (long long)(pid))
-#define __sanitizer_syscall_pre_compat_13_sigaction13(signum, nsa, osa)        \
-  __sanitizer_syscall_pre_impl_compat_13_sigaction13(                          \
-      (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_post_compat_13_sigaction13(res, signum, nsa, osa)  \
-  __sanitizer_syscall_post_impl_compat_13_sigaction13(                         \
-      res, (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid()
-#define __sanitizer_syscall_post_getgid(res)                                   \
-  __sanitizer_syscall_post_impl_getgid(res)
-#define __sanitizer_syscall_pre_compat_13_sigprocmask13(how, mask)             \
-  __sanitizer_syscall_pre_impl_compat_13_sigprocmask13((long long)(how),       \
-                                                       (long long)(mask))
-#define __sanitizer_syscall_post_compat_13_sigprocmask13(res, how, mask)       \
-  __sanitizer_syscall_post_impl_compat_13_sigprocmask13(res, (long long)(how), \
-                                                        (long long)(mask))
-#define __sanitizer_syscall_pre___getlogin(namebuf, namelen)                   \
-  __sanitizer_syscall_pre_impl___getlogin((long long)(namebuf),                \
-                                          (long long)(namelen))
-#define __sanitizer_syscall_post___getlogin(res, namebuf, namelen)             \
-  __sanitizer_syscall_post_impl___getlogin(res, (long long)(namebuf),          \
-                                           (long long)(namelen))
-#define __sanitizer_syscall_pre___setlogin(namebuf)                            \
-  __sanitizer_syscall_pre_impl___setlogin((long long)(namebuf))
-#define __sanitizer_syscall_post___setlogin(res, namebuf)                      \
-  __sanitizer_syscall_post_impl___setlogin(res, (long long)(namebuf))
-#define __sanitizer_syscall_pre_acct(path)                                     \
-  __sanitizer_syscall_pre_impl_acct((long long)(path))
-#define __sanitizer_syscall_post_acct(res, path)                               \
-  __sanitizer_syscall_post_impl_acct(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_13_sigpending13()                       \
-  __sanitizer_syscall_pre_impl_compat_13_sigpending13()
-#define __sanitizer_syscall_post_compat_13_sigpending13(res)                   \
-  __sanitizer_syscall_post_impl_compat_13_sigpending13(res)
-#define __sanitizer_syscall_pre_compat_13_sigaltstack13(nss, oss)              \
-  __sanitizer_syscall_pre_impl_compat_13_sigaltstack13((long long)(nss),       \
-                                                       (long long)(oss))
-#define __sanitizer_syscall_post_compat_13_sigaltstack13(res, nss, oss)        \
-  __sanitizer_syscall_post_impl_compat_13_sigaltstack13(res, (long long)(nss), \
-                                                        (long long)(oss))
-#define __sanitizer_syscall_pre_ioctl(fd, com, data)                           \
-  __sanitizer_syscall_pre_impl_ioctl((long long)(fd), (long long)(com),        \
-                                     (long long)(data))
-#define __sanitizer_syscall_post_ioctl(res, fd, com, data)                     \
-  __sanitizer_syscall_post_impl_ioctl(res, (long long)(fd), (long long)(com),  \
-                                      (long long)(data))
-#define __sanitizer_syscall_pre_compat_12_oreboot(opt)                         \
-  __sanitizer_syscall_pre_impl_compat_12_oreboot((long long)(opt))
-#define __sanitizer_syscall_post_compat_12_oreboot(res, opt)                   \
-  __sanitizer_syscall_post_impl_compat_12_oreboot(res, (long long)(opt))
-#define __sanitizer_syscall_pre_revoke(path)                                   \
-  __sanitizer_syscall_pre_impl_revoke((long long)(path))
-#define __sanitizer_syscall_post_revoke(res, path)                             \
-  __sanitizer_syscall_post_impl_revoke(res, (long long)(path))
-#define __sanitizer_syscall_pre_symlink(path, link)                            \
-  __sanitizer_syscall_pre_impl_symlink((long long)(path), (long long)(link))
-#define __sanitizer_syscall_post_symlink(res, path, link)                      \
-  __sanitizer_syscall_post_impl_symlink(res, (long long)(path),                \
-                                        (long long)(link))
-#define __sanitizer_syscall_pre_readlink(path, buf, count)                     \
-  __sanitizer_syscall_pre_impl_readlink((long long)(path), (long long)(buf),   \
-                                        (long long)(count))
-#define __sanitizer_syscall_post_readlink(res, path, buf, count)               \
-  __sanitizer_syscall_post_impl_readlink(res, (long long)(path),               \
-                                         (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_pre_execve(path, argp, envp)                       \
-  __sanitizer_syscall_pre_impl_execve((long long)(path), (long long)(argp),    \
-                                      (long long)(envp))
-#define __sanitizer_syscall_post_execve(res, path, argp, envp)                 \
-  __sanitizer_syscall_post_impl_execve(res, (long long)(path),                 \
-                                       (long long)(argp), (long long)(envp))
-#define __sanitizer_syscall_pre_umask(newmask)                                 \
-  __sanitizer_syscall_pre_impl_umask((long long)(newmask))
-#define __sanitizer_syscall_post_umask(res, newmask)                           \
-  __sanitizer_syscall_post_impl_umask(res, (long long)(newmask))
-#define __sanitizer_syscall_pre_chroot(path)                                   \
-  __sanitizer_syscall_pre_impl_chroot((long long)(path))
-#define __sanitizer_syscall_post_chroot(res, path)                             \
-  __sanitizer_syscall_post_impl_chroot(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_43_fstat43(fd, sb)                      \
-  __sanitizer_syscall_pre_impl_compat_43_fstat43((long long)(fd),              \
-                                                 (long long)(sb))
-#define __sanitizer_syscall_post_compat_43_fstat43(res, fd, sb)                \
-  __sanitizer_syscall_post_impl_compat_43_fstat43(res, (long long)(fd),        \
-                                                  (long long)(sb))
-#define __sanitizer_syscall_pre_compat_43_ogetkerninfo(op, where, size, arg)   \
-  __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo(                         \
-      (long long)(op), (long long)(where), (long long)(size),                  \
-      (long long)(arg))
-#define __sanitizer_syscall_post_compat_43_ogetkerninfo(res, op, where, size,  \
-                                                        arg)                   \
-  __sanitizer_syscall_post_impl_compat_43_ogetkerninfo(                        \
-      res, (long long)(op), (long long)(where), (long long)(size),             \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_43_ogetpagesize()                       \
-  __sanitizer_syscall_pre_impl_compat_43_ogetpagesize()
-#define __sanitizer_syscall_post_compat_43_ogetpagesize(res)                   \
-  __sanitizer_syscall_post_impl_compat_43_ogetpagesize(res)
-#define __sanitizer_syscall_pre_compat_12_msync(addr, len)                     \
-  __sanitizer_syscall_pre_impl_compat_12_msync((long long)(addr),              \
-                                               (long long)(len))
-#define __sanitizer_syscall_post_compat_12_msync(res, addr, len)               \
-  __sanitizer_syscall_post_impl_compat_12_msync(res, (long long)(addr),        \
-                                                (long long)(len))
-#define __sanitizer_syscall_pre_vfork() __sanitizer_syscall_pre_impl_vfork()
-#define __sanitizer_syscall_post_vfork(res)                                    \
-  __sanitizer_syscall_post_impl_vfork(res)
-/* syscall 67 has been skipped */
-/* syscall 68 has been skipped */
-/* syscall 69 has been skipped */
-/* syscall 70 has been skipped */
-#define __sanitizer_syscall_pre_compat_43_ommap(addr, len, prot, flags, fd,    \
-                                                pos)                           \
-  __sanitizer_syscall_pre_impl_compat_43_ommap(                                \
-      (long long)(addr), (long long)(len), (long long)(prot),                  \
-      (long long)(flags), (long long)(fd), (long long)(pos))
-#define __sanitizer_syscall_post_compat_43_ommap(res, addr, len, prot, flags,  \
-                                                 fd, pos)                      \
-  __sanitizer_syscall_post_impl_compat_43_ommap(                               \
-      res, (long long)(addr), (long long)(len), (long long)(prot),             \
-      (long long)(flags), (long long)(fd), (long long)(pos))
-#define __sanitizer_syscall_pre_vadvise(anom)                                  \
-  __sanitizer_syscall_pre_impl_vadvise((long long)(anom))
-#define __sanitizer_syscall_post_vadvise(res, anom)                            \
-  __sanitizer_syscall_post_impl_vadvise(res, (long long)(anom))
-#define __sanitizer_syscall_pre_munmap(addr, len)                              \
-  __sanitizer_syscall_pre_impl_munmap((long long)(addr), (long long)(len))
-#define __sanitizer_syscall_post_munmap(res, addr, len)                        \
-  __sanitizer_syscall_post_impl_munmap(res, (long long)(addr), (long long)(len))
-#define __sanitizer_syscall_pre_mprotect(addr, len, prot)                      \
-  __sanitizer_syscall_pre_impl_mprotect((long long)(addr), (long long)(len),   \
-                                        (long long)(prot))
-#define __sanitizer_syscall_post_mprotect(res, addr, len, prot)                \
-  __sanitizer_syscall_post_impl_mprotect(res, (long long)(addr),               \
-                                         (long long)(len), (long long)(prot))
-#define __sanitizer_syscall_pre_madvise(addr, len, behav)                      \
-  __sanitizer_syscall_pre_impl_madvise((long long)(addr), (long long)(len),    \
-                                       (long long)(behav))
-#define __sanitizer_syscall_post_madvise(res, addr, len, behav)                \
-  __sanitizer_syscall_post_impl_madvise(res, (long long)(addr),                \
-                                        (long long)(len), (long long)(behav))
-/* syscall 76 has been skipped */
-/* syscall 77 has been skipped */
-#define __sanitizer_syscall_pre_mincore(addr, len, vec)                        \
-  __sanitizer_syscall_pre_impl_mincore((long long)(addr), (long long)(len),    \
-                                       (long long)(vec))
-#define __sanitizer_syscall_post_mincore(res, addr, len, vec)                  \
-  __sanitizer_syscall_post_impl_mincore(res, (long long)(addr),                \
-                                        (long long)(len), (long long)(vec))
-#define __sanitizer_syscall_pre_getgroups(gidsetsize, gidset)                  \
-  __sanitizer_syscall_pre_impl_getgroups((long long)(gidsetsize),              \
-                                         (long long)(gidset))
-#define __sanitizer_syscall_post_getgroups(res, gidsetsize, gidset)            \
-  __sanitizer_syscall_post_impl_getgroups(res, (long long)(gidsetsize),        \
-                                          (long long)(gidset))
-#define __sanitizer_syscall_pre_setgroups(gidsetsize, gidset)                  \
-  __sanitizer_syscall_pre_impl_setgroups((long long)(gidsetsize),              \
-                                         (long long)(gidset))
-#define __sanitizer_syscall_post_setgroups(res, gidsetsize, gidset)            \
-  __sanitizer_syscall_post_impl_setgroups(res, (long long)(gidsetsize),        \
-                                          (long long)(gidset))
-#define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp()
-#define __sanitizer_syscall_post_getpgrp(res)                                  \
-  __sanitizer_syscall_post_impl_getpgrp(res)
-#define __sanitizer_syscall_pre_setpgid(pid, pgid)                             \
-  __sanitizer_syscall_pre_impl_setpgid((long long)(pid), (long long)(pgid))
-#define __sanitizer_syscall_post_setpgid(res, pid, pgid)                       \
-  __sanitizer_syscall_post_impl_setpgid(res, (long long)(pid),                 \
-                                        (long long)(pgid))
-#define __sanitizer_syscall_pre_compat_50_setitimer(which, itv, oitv)          \
-  __sanitizer_syscall_pre_impl_compat_50_setitimer(                            \
-      (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_post_compat_50_setitimer(res, which, itv, oitv)    \
-  __sanitizer_syscall_post_impl_compat_50_setitimer(                           \
-      res, (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_pre_compat_43_owait()                              \
-  __sanitizer_syscall_pre_impl_compat_43_owait()
-#define __sanitizer_syscall_post_compat_43_owait(res)                          \
-  __sanitizer_syscall_post_impl_compat_43_owait(res)
-#define __sanitizer_syscall_pre_compat_12_oswapon(name)                        \
-  __sanitizer_syscall_pre_impl_compat_12_oswapon((long long)(name))
-#define __sanitizer_syscall_post_compat_12_oswapon(res, name)                  \
-  __sanitizer_syscall_post_impl_compat_12_oswapon(res, (long long)(name))
-#define __sanitizer_syscall_pre_compat_50_getitimer(which, itv)                \
-  __sanitizer_syscall_pre_impl_compat_50_getitimer((long long)(which),         \
-                                                   (long long)(itv))
-#define __sanitizer_syscall_post_compat_50_getitimer(res, which, itv)          \
-  __sanitizer_syscall_post_impl_compat_50_getitimer(res, (long long)(which),   \
-                                                    (long long)(itv))
-#define __sanitizer_syscall_pre_compat_43_ogethostname(hostname, len)          \
-  __sanitizer_syscall_pre_impl_compat_43_ogethostname((long long)(hostname),   \
-                                                      (long long)(len))
-#define __sanitizer_syscall_post_compat_43_ogethostname(res, hostname, len)    \
-  __sanitizer_syscall_post_impl_compat_43_ogethostname(                        \
-      res, (long long)(hostname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_43_osethostname(hostname, len)          \
-  __sanitizer_syscall_pre_impl_compat_43_osethostname((long long)(hostname),   \
-                                                      (long long)(len))
-#define __sanitizer_syscall_post_compat_43_osethostname(res, hostname, len)    \
-  __sanitizer_syscall_post_impl_compat_43_osethostname(                        \
-      res, (long long)(hostname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_43_ogetdtablesize()                     \
-  __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize()
-#define __sanitizer_syscall_post_compat_43_ogetdtablesize(res)                 \
-  __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(res)
-#define __sanitizer_syscall_pre_dup2(from, to)                                 \
-  __sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to))
-#define __sanitizer_syscall_post_dup2(res, from, to)                           \
-  __sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to))
-/* syscall 91 has been skipped */
-#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg)                            \
-  __sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd),        \
-                                     (long long)(arg))
-#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg)                      \
-  __sanitizer_syscall_post_impl_fcntl(res, (long long)(fd), (long long)(cmd),  \
-                                      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_50_select(nd, in, ou, ex, tv)           \
-  __sanitizer_syscall_pre_impl_compat_50_select(                               \
-      (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex),      \
-      (long long)(tv))
-#define __sanitizer_syscall_post_compat_50_select(res, nd, in, ou, ex, tv)     \
-  __sanitizer_syscall_post_impl_compat_50_select(                              \
-      res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
-      (long long)(tv))
-/* syscall 94 has been skipped */
-#define __sanitizer_syscall_pre_fsync(fd)                                      \
-  __sanitizer_syscall_pre_impl_fsync((long long)(fd))
-#define __sanitizer_syscall_post_fsync(res, fd)                                \
-  __sanitizer_syscall_post_impl_fsync(res, (long long)(fd))
-#define __sanitizer_syscall_pre_setpriority(which, who, prio)                  \
-  __sanitizer_syscall_pre_impl_setpriority(                                    \
-      (long long)(which), (long long)(who), (long long)(prio))
-#define __sanitizer_syscall_post_setpriority(res, which, who, prio)            \
-  __sanitizer_syscall_post_impl_setpriority(                                   \
-      res, (long long)(which), (long long)(who), (long long)(prio))
-#define __sanitizer_syscall_pre_compat_30_socket(domain, type, protocol)       \
-  __sanitizer_syscall_pre_impl_compat_30_socket(                               \
-      (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_post_compat_30_socket(res, domain, type, protocol) \
-  __sanitizer_syscall_post_impl_compat_30_socket(                              \
-      res, (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_pre_connect(s, name, namelen)                      \
-  __sanitizer_syscall_pre_impl_connect((long long)(s), (long long)(name),      \
-                                       (long long)(namelen))
-#define __sanitizer_syscall_post_connect(res, s, name, namelen)                \
-  __sanitizer_syscall_post_impl_connect(                                       \
-      res, (long long)(s), (long long)(name), (long long)(namelen))
-#define __sanitizer_syscall_pre_compat_43_oaccept(s, name, anamelen)           \
-  __sanitizer_syscall_pre_impl_compat_43_oaccept(                              \
-      (long long)(s), (long long)(name), (long long)(anamelen))
-#define __sanitizer_syscall_post_compat_43_oaccept(res, s, name, anamelen)     \
-  __sanitizer_syscall_post_impl_compat_43_oaccept(                             \
-      res, (long long)(s), (long long)(name), (long long)(anamelen))
-#define __sanitizer_syscall_pre_getpriority(which, who)                        \
-  __sanitizer_syscall_pre_impl_getpriority((long long)(which), (long long)(who))
-#define __sanitizer_syscall_post_getpriority(res, which, who)                  \
-  __sanitizer_syscall_post_impl_getpriority(res, (long long)(which),           \
-                                            (long long)(who))
-#define __sanitizer_syscall_pre_compat_43_osend(s, buf, len, flags)            \
-  __sanitizer_syscall_pre_impl_compat_43_osend(                                \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_osend(res, s, buf, len, flags)      \
-  __sanitizer_syscall_post_impl_compat_43_osend(                               \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags))
-#define __sanitizer_syscall_pre_compat_43_orecv(s, buf, len, flags)            \
-  __sanitizer_syscall_pre_impl_compat_43_orecv(                                \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_orecv(res, s, buf, len, flags)      \
-  __sanitizer_syscall_post_impl_compat_43_orecv(                               \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags))
-#define __sanitizer_syscall_pre_compat_13_sigreturn13(sigcntxp)                \
-  __sanitizer_syscall_pre_impl_compat_13_sigreturn13((long long)(sigcntxp))
-#define __sanitizer_syscall_post_compat_13_sigreturn13(res, sigcntxp)          \
-  __sanitizer_syscall_post_impl_compat_13_sigreturn13(res,                     \
-                                                      (long long)(sigcntxp))
-#define __sanitizer_syscall_pre_bind(s, name, namelen)                         \
-  __sanitizer_syscall_pre_impl_bind((long long)(s), (long long)(name),         \
-                                    (long long)(namelen))
-#define __sanitizer_syscall_post_bind(res, s, name, namelen)                   \
-  __sanitizer_syscall_post_impl_bind(res, (long long)(s), (long long)(name),   \
-                                     (long long)(namelen))
-#define __sanitizer_syscall_pre_setsockopt(s, level, name, val, valsize)       \
-  __sanitizer_syscall_pre_impl_setsockopt((long long)(s), (long long)(level),  \
-                                          (long long)(name), (long long)(val), \
-                                          (long long)(valsize))
-#define __sanitizer_syscall_post_setsockopt(res, s, level, name, val, valsize) \
-  __sanitizer_syscall_post_impl_setsockopt(                                    \
-      res, (long long)(s), (long long)(level), (long long)(name),              \
-      (long long)(val), (long long)(valsize))
-#define __sanitizer_syscall_pre_listen(s, backlog)                             \
-  __sanitizer_syscall_pre_impl_listen((long long)(s), (long long)(backlog))
-#define __sanitizer_syscall_post_listen(res, s, backlog)                       \
-  __sanitizer_syscall_post_impl_listen(res, (long long)(s),                    \
-                                       (long long)(backlog))
-/* syscall 107 has been skipped */
-#define __sanitizer_syscall_pre_compat_43_osigvec(signum, nsv, osv)            \
-  __sanitizer_syscall_pre_impl_compat_43_osigvec(                              \
-      (long long)(signum), (long long)(nsv), (long long)(osv))
-#define __sanitizer_syscall_post_compat_43_osigvec(res, signum, nsv, osv)      \
-  __sanitizer_syscall_post_impl_compat_43_osigvec(                             \
-      res, (long long)(signum), (long long)(nsv), (long long)(osv))
-#define __sanitizer_syscall_pre_compat_43_osigblock(mask)                      \
-  __sanitizer_syscall_pre_impl_compat_43_osigblock((long long)(mask))
-#define __sanitizer_syscall_post_compat_43_osigblock(res, mask)                \
-  __sanitizer_syscall_post_impl_compat_43_osigblock(res, (long long)(mask))
-#define __sanitizer_syscall_pre_compat_43_osigsetmask(mask)                    \
-  __sanitizer_syscall_pre_impl_compat_43_osigsetmask((long long)(mask))
-#define __sanitizer_syscall_post_compat_43_osigsetmask(res, mask)              \
-  __sanitizer_syscall_post_impl_compat_43_osigsetmask(res, (long long)(mask))
-#define __sanitizer_syscall_pre_compat_13_sigsuspend13(mask)                   \
-  __sanitizer_syscall_pre_impl_compat_13_sigsuspend13((long long)(mask))
-#define __sanitizer_syscall_post_compat_13_sigsuspend13(res, mask)             \
-  __sanitizer_syscall_post_impl_compat_13_sigsuspend13(res, (long long)(mask))
-#define __sanitizer_syscall_pre_compat_43_osigstack(nss, oss)                  \
-  __sanitizer_syscall_pre_impl_compat_43_osigstack((long long)(nss),           \
-                                                   (long long)(oss))
-#define __sanitizer_syscall_post_compat_43_osigstack(res, nss, oss)            \
-  __sanitizer_syscall_post_impl_compat_43_osigstack(res, (long long)(nss),     \
-                                                    (long long)(oss))
-#define __sanitizer_syscall_pre_compat_43_orecvmsg(s, msg, flags)              \
-  __sanitizer_syscall_pre_impl_compat_43_orecvmsg(                             \
-      (long long)(s), (long long)(msg), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_orecvmsg(res, s, msg, flags)        \
-  __sanitizer_syscall_post_impl_compat_43_orecvmsg(                            \
-      res, (long long)(s), (long long)(msg), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_43_osendmsg(s, msg, flags)              \
-  __sanitizer_syscall_pre_impl_compat_43_osendmsg(                             \
-      (long long)(s), (long long)(msg), (long long)(flags))
-#define __sanitizer_syscall_post_compat_43_osendmsg(res, s, msg, flags)        \
-  __sanitizer_syscall_post_impl_compat_43_osendmsg(                            \
-      res, (long long)(s), (long long)(msg), (long long)(flags))
-/* syscall 115 has been skipped */
-#define __sanitizer_syscall_pre_compat_50_gettimeofday(tp, tzp)                \
-  __sanitizer_syscall_pre_impl_compat_50_gettimeofday((long long)(tp),         \
-                                                      (long long)(tzp))
-#define __sanitizer_syscall_post_compat_50_gettimeofday(res, tp, tzp)          \
-  __sanitizer_syscall_post_impl_compat_50_gettimeofday(res, (long long)(tp),   \
-                                                       (long long)(tzp))
-#define __sanitizer_syscall_pre_compat_50_getrusage(who, rusage)               \
-  __sanitizer_syscall_pre_impl_compat_50_getrusage((long long)(who),           \
-                                                   (long long)(rusage))
-#define __sanitizer_syscall_post_compat_50_getrusage(res, who, rusage)         \
-  __sanitizer_syscall_post_impl_compat_50_getrusage(res, (long long)(who),     \
-                                                    (long long)(rusage))
-#define __sanitizer_syscall_pre_getsockopt(s, level, name, val, avalsize)      \
-  __sanitizer_syscall_pre_impl_getsockopt((long long)(s), (long long)(level),  \
-                                          (long long)(name), (long long)(val), \
-                                          (long long)(avalsize))
-#define __sanitizer_syscall_post_getsockopt(res, s, level, name, val,          \
-                                            avalsize)                          \
-  __sanitizer_syscall_post_impl_getsockopt(                                    \
-      res, (long long)(s), (long long)(level), (long long)(name),              \
-      (long long)(val), (long long)(avalsize))
-/* syscall 119 has been skipped */
-#define __sanitizer_syscall_pre_readv(fd, iovp, iovcnt)                        \
-  __sanitizer_syscall_pre_impl_readv((long long)(fd), (long long)(iovp),       \
-                                     (long long)(iovcnt))
-#define __sanitizer_syscall_post_readv(res, fd, iovp, iovcnt)                  \
-  __sanitizer_syscall_post_impl_readv(res, (long long)(fd), (long long)(iovp), \
-                                      (long long)(iovcnt))
-#define __sanitizer_syscall_pre_writev(fd, iovp, iovcnt)                       \
-  __sanitizer_syscall_pre_impl_writev((long long)(fd), (long long)(iovp),      \
-                                      (long long)(iovcnt))
-#define __sanitizer_syscall_post_writev(res, fd, iovp, iovcnt)                 \
-  __sanitizer_syscall_post_impl_writev(res, (long long)(fd),                   \
-                                       (long long)(iovp), (long long)(iovcnt))
-#define __sanitizer_syscall_pre_compat_50_settimeofday(tv, tzp)                \
-  __sanitizer_syscall_pre_impl_compat_50_settimeofday((long long)(tv),         \
-                                                      (long long)(tzp))
-#define __sanitizer_syscall_post_compat_50_settimeofday(res, tv, tzp)          \
-  __sanitizer_syscall_post_impl_compat_50_settimeofday(res, (long long)(tv),   \
-                                                       (long long)(tzp))
-#define __sanitizer_syscall_pre_fchown(fd, uid, gid)                           \
-  __sanitizer_syscall_pre_impl_fchown((long long)(fd), (long long)(uid),       \
-                                      (long long)(gid))
-#define __sanitizer_syscall_post_fchown(res, fd, uid, gid)                     \
-  __sanitizer_syscall_post_impl_fchown(res, (long long)(fd), (long long)(uid), \
-                                       (long long)(gid))
-#define __sanitizer_syscall_pre_fchmod(fd, mode)                               \
-  __sanitizer_syscall_pre_impl_fchmod((long long)(fd), (long long)(mode))
-#define __sanitizer_syscall_post_fchmod(res, fd, mode)                         \
-  __sanitizer_syscall_post_impl_fchmod(res, (long long)(fd), (long long)(mode))
-#define __sanitizer_syscall_pre_compat_43_orecvfrom(s, buf, len, flags, from,  \
-                                                    fromlenaddr)               \
-  __sanitizer_syscall_pre_impl_compat_43_orecvfrom(                            \
-      (long long)(s), (long long)(buf), (long long)(len), (long long)(flags),  \
-      (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_post_compat_43_orecvfrom(res, s, buf, len, flags,  \
-                                                     from, fromlenaddr)        \
-  __sanitizer_syscall_post_impl_compat_43_orecvfrom(                           \
-      res, (long long)(s), (long long)(buf), (long long)(len),                 \
-      (long long)(flags), (long long)(from), (long long)(fromlenaddr))
-#define __sanitizer_syscall_pre_setreuid(ruid, euid)                           \
-  __sanitizer_syscall_pre_impl_setreuid((long long)(ruid), (long long)(euid))
-#define __sanitizer_syscall_post_setreuid(res, ruid, euid)                     \
-  __sanitizer_syscall_post_impl_setreuid(res, (long long)(ruid),               \
-                                         (long long)(euid))
-#define __sanitizer_syscall_pre_setregid(rgid, egid)                           \
-  __sanitizer_syscall_pre_impl_setregid((long long)(rgid), (long long)(egid))
-#define __sanitizer_syscall_post_setregid(res, rgid, egid)                     \
-  __sanitizer_syscall_post_impl_setregid(res, (long long)(rgid),               \
-                                         (long long)(egid))
-#define __sanitizer_syscall_pre_rename(from, to)                               \
-  __sanitizer_syscall_pre_impl_rename((long long)(from), (long long)(to))
-#define __sanitizer_syscall_post_rename(res, from, to)                         \
-  __sanitizer_syscall_post_impl_rename(res, (long long)(from), (long long)(to))
-#define __sanitizer_syscall_pre_compat_43_otruncate(path, length)              \
-  __sanitizer_syscall_pre_impl_compat_43_otruncate((long long)(path),          \
-                                                   (long long)(length))
-#define __sanitizer_syscall_post_compat_43_otruncate(res, path, length)        \
-  __sanitizer_syscall_post_impl_compat_43_otruncate(res, (long long)(path),    \
-                                                    (long long)(length))
-#define __sanitizer_syscall_pre_compat_43_oftruncate(fd, length)               \
-  __sanitizer_syscall_pre_impl_compat_43_oftruncate((long long)(fd),           \
-                                                    (long long)(length))
-#define __sanitizer_syscall_post_compat_43_oftruncate(res, fd, length)         \
-  __sanitizer_syscall_post_impl_compat_43_oftruncate(res, (long long)(fd),     \
-                                                     (long long)(length))
-#define __sanitizer_syscall_pre_flock(fd, how)                                 \
-  __sanitizer_syscall_pre_impl_flock((long long)(fd), (long long)(how))
-#define __sanitizer_syscall_post_flock(res, fd, how)                           \
-  __sanitizer_syscall_post_impl_flock(res, (long long)(fd), (long long)(how))
-#define __sanitizer_syscall_pre_mkfifo(path, mode)                             \
-  __sanitizer_syscall_pre_impl_mkfifo((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_mkfifo(res, path, mode)                       \
-  __sanitizer_syscall_post_impl_mkfifo(res, (long long)(path),                 \
-                                       (long long)(mode))
-#define __sanitizer_syscall_pre_sendto(s, buf, len, flags, to, tolen)          \
-  __sanitizer_syscall_pre_impl_sendto((long long)(s), (long long)(buf),        \
-                                      (long long)(len), (long long)(flags),    \
-                                      (long long)(to), (long long)(tolen))
-#define __sanitizer_syscall_post_sendto(res, s, buf, len, flags, to, tolen)    \
-  __sanitizer_syscall_post_impl_sendto(res, (long long)(s), (long long)(buf),  \
-                                       (long long)(len), (long long)(flags),   \
-                                       (long long)(to), (long long)(tolen))
-#define __sanitizer_syscall_pre_shutdown(s, how)                               \
-  __sanitizer_syscall_pre_impl_shutdown((long long)(s), (long long)(how))
-#define __sanitizer_syscall_post_shutdown(res, s, how)                         \
-  __sanitizer_syscall_post_impl_shutdown(res, (long long)(s), (long long)(how))
-#define __sanitizer_syscall_pre_socketpair(domain, type, protocol, rsv)        \
-  __sanitizer_syscall_pre_impl_socketpair(                                     \
-      (long long)(domain), (long long)(type), (long long)(protocol),           \
-      (long long)(rsv))
-#define __sanitizer_syscall_post_socketpair(res, domain, type, protocol, rsv)  \
-  __sanitizer_syscall_post_impl_socketpair(                                    \
-      res, (long long)(domain), (long long)(type), (long long)(protocol),      \
-      (long long)(rsv))
-#define __sanitizer_syscall_pre_mkdir(path, mode)                              \
-  __sanitizer_syscall_pre_impl_mkdir((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_mkdir(res, path, mode)                        \
-  __sanitizer_syscall_post_impl_mkdir(res, (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_rmdir(path)                                    \
-  __sanitizer_syscall_pre_impl_rmdir((long long)(path))
-#define __sanitizer_syscall_post_rmdir(res, path)                              \
-  __sanitizer_syscall_post_impl_rmdir(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_50_utimes(path, tptr)                   \
-  __sanitizer_syscall_pre_impl_compat_50_utimes((long long)(path),             \
-                                                (long long)(tptr))
-#define __sanitizer_syscall_post_compat_50_utimes(res, path, tptr)             \
-  __sanitizer_syscall_post_impl_compat_50_utimes(res, (long long)(path),       \
-                                                 (long long)(tptr))
-/* syscall 139 has been skipped */
-#define __sanitizer_syscall_pre_compat_50_adjtime(delta, olddelta)             \
-  __sanitizer_syscall_pre_impl_compat_50_adjtime((long long)(delta),           \
-                                                 (long long)(olddelta))
-#define __sanitizer_syscall_post_compat_50_adjtime(res, delta, olddelta)       \
-  __sanitizer_syscall_post_impl_compat_50_adjtime(res, (long long)(delta),     \
-                                                  (long long)(olddelta))
-#define __sanitizer_syscall_pre_compat_43_ogetpeername(fdes, asa, alen)        \
-  __sanitizer_syscall_pre_impl_compat_43_ogetpeername(                         \
-      (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_compat_43_ogetpeername(res, fdes, asa, alen)  \
-  __sanitizer_syscall_post_impl_compat_43_ogetpeername(                        \
-      res, (long long)(fdes), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_pre_compat_43_ogethostid()                         \
-  __sanitizer_syscall_pre_impl_compat_43_ogethostid()
-#define __sanitizer_syscall_post_compat_43_ogethostid(res)                     \
-  __sanitizer_syscall_post_impl_compat_43_ogethostid(res)
-#define __sanitizer_syscall_pre_compat_43_osethostid(hostid)                   \
-  __sanitizer_syscall_pre_impl_compat_43_osethostid((long long)(hostid))
-#define __sanitizer_syscall_post_compat_43_osethostid(res, hostid)             \
-  __sanitizer_syscall_post_impl_compat_43_osethostid(res, (long long)(hostid))
-#define __sanitizer_syscall_pre_compat_43_ogetrlimit(which, rlp)               \
-  __sanitizer_syscall_pre_impl_compat_43_ogetrlimit((long long)(which),        \
-                                                    (long long)(rlp))
-#define __sanitizer_syscall_post_compat_43_ogetrlimit(res, which, rlp)         \
-  __sanitizer_syscall_post_impl_compat_43_ogetrlimit(res, (long long)(which),  \
-                                                     (long long)(rlp))
-#define __sanitizer_syscall_pre_compat_43_osetrlimit(which, rlp)               \
-  __sanitizer_syscall_pre_impl_compat_43_osetrlimit((long long)(which),        \
-                                                    (long long)(rlp))
-#define __sanitizer_syscall_post_compat_43_osetrlimit(res, which, rlp)         \
-  __sanitizer_syscall_post_impl_compat_43_osetrlimit(res, (long long)(which),  \
-                                                     (long long)(rlp))
-#define __sanitizer_syscall_pre_compat_43_okillpg(pgid, signum)                \
-  __sanitizer_syscall_pre_impl_compat_43_okillpg((long long)(pgid),            \
-                                                 (long long)(signum))
-#define __sanitizer_syscall_post_compat_43_okillpg(res, pgid, signum)          \
-  __sanitizer_syscall_post_impl_compat_43_okillpg(res, (long long)(pgid),      \
-                                                  (long long)(signum))
-#define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid()
-#define __sanitizer_syscall_post_setsid(res)                                   \
-  __sanitizer_syscall_post_impl_setsid(res)
-#define __sanitizer_syscall_pre_compat_50_quotactl(path, cmd, uid, arg)        \
-  __sanitizer_syscall_pre_impl_compat_50_quotactl(                             \
-      (long long)(path), (long long)(cmd), (long long)(uid), (long long)(arg))
-#define __sanitizer_syscall_post_compat_50_quotactl(res, path, cmd, uid, arg)  \
-  __sanitizer_syscall_post_impl_compat_50_quotactl(                            \
-      res, (long long)(path), (long long)(cmd), (long long)(uid),              \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_43_oquota()                             \
-  __sanitizer_syscall_pre_impl_compat_43_oquota()
-#define __sanitizer_syscall_post_compat_43_oquota(res)                         \
-  __sanitizer_syscall_post_impl_compat_43_oquota(res)
-#define __sanitizer_syscall_pre_compat_43_ogetsockname(fdec, asa, alen)        \
-  __sanitizer_syscall_pre_impl_compat_43_ogetsockname(                         \
-      (long long)(fdec), (long long)(asa), (long long)(alen))
-#define __sanitizer_syscall_post_compat_43_ogetsockname(res, fdec, asa, alen)  \
-  __sanitizer_syscall_post_impl_compat_43_ogetsockname(                        \
-      res, (long long)(fdec), (long long)(asa), (long long)(alen))
-/* syscall 151 has been skipped */
-/* syscall 152 has been skipped */
-/* syscall 153 has been skipped */
-/* syscall 154 has been skipped */
-#define __sanitizer_syscall_pre_nfssvc(flag, argp)                             \
-  __sanitizer_syscall_pre_impl_nfssvc((long long)(flag), (long long)(argp))
-#define __sanitizer_syscall_post_nfssvc(res, flag, argp)                       \
-  __sanitizer_syscall_post_impl_nfssvc(res, (long long)(flag),                 \
-                                       (long long)(argp))
-#define __sanitizer_syscall_pre_compat_43_ogetdirentries(fd, buf, count,       \
-                                                         basep)                \
-  __sanitizer_syscall_pre_impl_compat_43_ogetdirentries(                       \
-      (long long)(fd), (long long)(buf), (long long)(count),                   \
-      (long long)(basep))
-#define __sanitizer_syscall_post_compat_43_ogetdirentries(res, fd, buf, count, \
-                                                          basep)               \
-  __sanitizer_syscall_post_impl_compat_43_ogetdirentries(                      \
-      res, (long long)(fd), (long long)(buf), (long long)(count),              \
-      (long long)(basep))
-#define __sanitizer_syscall_pre_compat_20_statfs(path, buf)                    \
-  __sanitizer_syscall_pre_impl_compat_20_statfs((long long)(path),             \
-                                                (long long)(buf))
-#define __sanitizer_syscall_post_compat_20_statfs(res, path, buf)              \
-  __sanitizer_syscall_post_impl_compat_20_statfs(res, (long long)(path),       \
-                                                 (long long)(buf))
-#define __sanitizer_syscall_pre_compat_20_fstatfs(fd, buf)                     \
-  __sanitizer_syscall_pre_impl_compat_20_fstatfs((long long)(fd),              \
-                                                 (long long)(buf))
-#define __sanitizer_syscall_post_compat_20_fstatfs(res, fd, buf)               \
-  __sanitizer_syscall_post_impl_compat_20_fstatfs(res, (long long)(fd),        \
-                                                  (long long)(buf))
-/* syscall 159 has been skipped */
-/* syscall 160 has been skipped */
-#define __sanitizer_syscall_pre_compat_30_getfh(fname, fhp)                    \
-  __sanitizer_syscall_pre_impl_compat_30_getfh((long long)(fname),             \
-                                               (long long)(fhp))
-#define __sanitizer_syscall_post_compat_30_getfh(res, fname, fhp)              \
-  __sanitizer_syscall_post_impl_compat_30_getfh(res, (long long)(fname),       \
-                                                (long long)(fhp))
-#define __sanitizer_syscall_pre_compat_09_ogetdomainname(domainname, len)      \
-  __sanitizer_syscall_pre_impl_compat_09_ogetdomainname(                       \
-      (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_post_compat_09_ogetdomainname(res, domainname,     \
-                                                          len)                 \
-  __sanitizer_syscall_post_impl_compat_09_ogetdomainname(                      \
-      res, (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_09_osetdomainname(domainname, len)      \
-  __sanitizer_syscall_pre_impl_compat_09_osetdomainname(                       \
-      (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_post_compat_09_osetdomainname(res, domainname,     \
-                                                          len)                 \
-  __sanitizer_syscall_post_impl_compat_09_osetdomainname(                      \
-      res, (long long)(domainname), (long long)(len))
-#define __sanitizer_syscall_pre_compat_09_ouname(name)                         \
-  __sanitizer_syscall_pre_impl_compat_09_ouname((long long)(name))
-#define __sanitizer_syscall_post_compat_09_ouname(res, name)                   \
-  __sanitizer_syscall_post_impl_compat_09_ouname(res, (long long)(name))
-#define __sanitizer_syscall_pre_sysarch(op, parms)                             \
-  __sanitizer_syscall_pre_impl_sysarch((long long)(op), (long long)(parms))
-#define __sanitizer_syscall_post_sysarch(res, op, parms)                       \
-  __sanitizer_syscall_post_impl_sysarch(res, (long long)(op),                  \
-                                        (long long)(parms))
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
-#if !defined(_LP64)
-#define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5)       \
-  __sanitizer_syscall_pre_impl_compat_10_osemsys(                              \
-      (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4),   \
-      (long long)(a5))
-#define __sanitizer_syscall_post_compat_10_osemsys(res, which, a2, a3, a4, a5) \
-  __sanitizer_syscall_post_impl_compat_10_osemsys(                             \
-      res, (long long)(which), (long long)(a2), (long long)(a3),               \
-      (long long)(a4), (long long)(a5))
-#else
-/* syscall 169 has been skipped */
-#endif
-#if !defined(_LP64)
-#define __sanitizer_syscall_pre_compat_10_omsgsys(which, a2, a3, a4, a5, a6)   \
-  __sanitizer_syscall_pre_impl_compat_10_omsgsys(                              \
-      (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4),   \
-      (long long)(a5), (long long)(a6))
-#define __sanitizer_syscall_post_compat_10_omsgsys(res, which, a2, a3, a4, a5, \
-                                                   a6)                         \
-  __sanitizer_syscall_post_impl_compat_10_omsgsys(                             \
-      res, (long long)(which), (long long)(a2), (long long)(a3),               \
-      (long long)(a4), (long long)(a5), (long long)(a6))
-#else
-/* syscall 170 has been skipped */
-#endif
-#if !defined(_LP64)
-#define __sanitizer_syscall_pre_compat_10_oshmsys(which, a2, a3, a4)           \
-  __sanitizer_syscall_pre_impl_compat_10_oshmsys(                              \
-      (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4))
-#define __sanitizer_syscall_post_compat_10_oshmsys(res, which, a2, a3, a4)     \
-  __sanitizer_syscall_post_impl_compat_10_oshmsys(                             \
-      res, (long long)(which), (long long)(a2), (long long)(a3),               \
-      (long long)(a4))
-#else
-/* syscall 171 has been skipped */
-#endif
-/* syscall 172 has been skipped */
-#define __sanitizer_syscall_pre_pread(fd, buf, nbyte, PAD, offset)             \
-  __sanitizer_syscall_pre_impl_pread((long long)(fd), (long long)(buf),        \
-                                     (long long)(nbyte), (long long)(PAD),     \
-                                     (long long)(offset))
-#define __sanitizer_syscall_post_pread(res, fd, buf, nbyte, PAD, offset)       \
-  __sanitizer_syscall_post_impl_pread(res, (long long)(fd), (long long)(buf),  \
-                                      (long long)(nbyte), (long long)(PAD),    \
-                                      (long long)(offset))
-#define __sanitizer_syscall_pre_pwrite(fd, buf, nbyte, PAD, offset)            \
-  __sanitizer_syscall_pre_impl_pwrite((long long)(fd), (long long)(buf),       \
-                                      (long long)(nbyte), (long long)(PAD),    \
-                                      (long long)(offset))
-#define __sanitizer_syscall_post_pwrite(res, fd, buf, nbyte, PAD, offset)      \
-  __sanitizer_syscall_post_impl_pwrite(res, (long long)(fd), (long long)(buf), \
-                                       (long long)(nbyte), (long long)(PAD),   \
-                                       (long long)(offset))
-#define __sanitizer_syscall_pre_compat_30_ntp_gettime(ntvp)                    \
-  __sanitizer_syscall_pre_impl_compat_30_ntp_gettime((long long)(ntvp))
-#define __sanitizer_syscall_post_compat_30_ntp_gettime(res, ntvp)              \
-  __sanitizer_syscall_post_impl_compat_30_ntp_gettime(res, (long long)(ntvp))
-#if defined(NTP) || !defined(_KERNEL_OPT)
-#define __sanitizer_syscall_pre_ntp_adjtime(tp)                                \
-  __sanitizer_syscall_pre_impl_ntp_adjtime((long long)(tp))
-#define __sanitizer_syscall_post_ntp_adjtime(res, tp)                          \
-  __sanitizer_syscall_post_impl_ntp_adjtime(res, (long long)(tp))
-#else
-/* syscall 176 has been skipped */
-#endif
-/* syscall 177 has been skipped */
-/* syscall 178 has been skipped */
-/* syscall 179 has been skipped */
-/* syscall 180 has been skipped */
-#define __sanitizer_syscall_pre_setgid(gid)                                    \
-  __sanitizer_syscall_pre_impl_setgid((long long)(gid))
-#define __sanitizer_syscall_post_setgid(res, gid)                              \
-  __sanitizer_syscall_post_impl_setgid(res, (long long)(gid))
-#define __sanitizer_syscall_pre_setegid(egid)                                  \
-  __sanitizer_syscall_pre_impl_setegid((long long)(egid))
-#define __sanitizer_syscall_post_setegid(res, egid)                            \
-  __sanitizer_syscall_post_impl_setegid(res, (long long)(egid))
-#define __sanitizer_syscall_pre_seteuid(euid)                                  \
-  __sanitizer_syscall_pre_impl_seteuid((long long)(euid))
-#define __sanitizer_syscall_post_seteuid(res, euid)                            \
-  __sanitizer_syscall_post_impl_seteuid(res, (long long)(euid))
-#define __sanitizer_syscall_pre_lfs_bmapv(fsidp, blkiov, blkcnt)               \
-  __sanitizer_syscall_pre_impl_lfs_bmapv(                                      \
-      (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_post_lfs_bmapv(res, fsidp, blkiov, blkcnt)         \
-  __sanitizer_syscall_post_impl_lfs_bmapv(                                     \
-      res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_pre_lfs_markv(fsidp, blkiov, blkcnt)               \
-  __sanitizer_syscall_pre_impl_lfs_markv(                                      \
-      (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_post_lfs_markv(res, fsidp, blkiov, blkcnt)         \
-  __sanitizer_syscall_post_impl_lfs_markv(                                     \
-      res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
-#define __sanitizer_syscall_pre_lfs_segclean(fsidp, segment)                   \
-  __sanitizer_syscall_pre_impl_lfs_segclean((long long)(fsidp),                \
-                                            (long long)(segment))
-#define __sanitizer_syscall_post_lfs_segclean(res, fsidp, segment)             \
-  __sanitizer_syscall_post_impl_lfs_segclean(res, (long long)(fsidp),          \
-                                             (long long)(segment))
-#define __sanitizer_syscall_pre_compat_50_lfs_segwait(fsidp, tv)               \
-  __sanitizer_syscall_pre_impl_compat_50_lfs_segwait((long long)(fsidp),       \
-                                                     (long long)(tv))
-#define __sanitizer_syscall_post_compat_50_lfs_segwait(res, fsidp, tv)         \
-  __sanitizer_syscall_post_impl_compat_50_lfs_segwait(res, (long long)(fsidp), \
-                                                      (long long)(tv))
-#define __sanitizer_syscall_pre_compat_12_stat12(path, ub)                     \
-  __sanitizer_syscall_pre_impl_compat_12_stat12((long long)(path),             \
-                                                (long long)(ub))
-#define __sanitizer_syscall_post_compat_12_stat12(res, path, ub)               \
-  __sanitizer_syscall_post_impl_compat_12_stat12(res, (long long)(path),       \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_pre_compat_12_fstat12(fd, sb)                      \
-  __sanitizer_syscall_pre_impl_compat_12_fstat12((long long)(fd),              \
-                                                 (long long)(sb))
-#define __sanitizer_syscall_post_compat_12_fstat12(res, fd, sb)                \
-  __sanitizer_syscall_post_impl_compat_12_fstat12(res, (long long)(fd),        \
-                                                  (long long)(sb))
-#define __sanitizer_syscall_pre_compat_12_lstat12(path, ub)                    \
-  __sanitizer_syscall_pre_impl_compat_12_lstat12((long long)(path),            \
-                                                 (long long)(ub))
-#define __sanitizer_syscall_post_compat_12_lstat12(res, path, ub)              \
-  __sanitizer_syscall_post_impl_compat_12_lstat12(res, (long long)(path),      \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_pre_pathconf(path, name)                           \
-  __sanitizer_syscall_pre_impl_pathconf((long long)(path), (long long)(name))
-#define __sanitizer_syscall_post_pathconf(res, path, name)                     \
-  __sanitizer_syscall_post_impl_pathconf(res, (long long)(path),               \
-                                         (long long)(name))
-#define __sanitizer_syscall_pre_fpathconf(fd, name)                            \
-  __sanitizer_syscall_pre_impl_fpathconf((long long)(fd), (long long)(name))
-#define __sanitizer_syscall_post_fpathconf(res, fd, name)                      \
-  __sanitizer_syscall_post_impl_fpathconf(res, (long long)(fd),                \
-                                          (long long)(name))
-#define __sanitizer_syscall_pre_getsockopt2(s, level, name, val, avalsize)     \
-  __sanitizer_syscall_pre_impl_getsockopt2(                                    \
-      (long long)(s), (long long)(level), (long long)(name), (long long)(val), \
-      (long long)(avalsize))
-#define __sanitizer_syscall_post_getsockopt2(res, s, level, name, val,         \
-                                             avalsize)                         \
-  __sanitizer_syscall_post_impl_getsockopt2(                                   \
-      res, (long long)(s), (long long)(level), (long long)(name),              \
-      (long long)(val), (long long)(avalsize))
-#define __sanitizer_syscall_pre_getrlimit(which, rlp)                          \
-  __sanitizer_syscall_pre_impl_getrlimit((long long)(which), (long long)(rlp))
-#define __sanitizer_syscall_post_getrlimit(res, which, rlp)                    \
-  __sanitizer_syscall_post_impl_getrlimit(res, (long long)(which),             \
-                                          (long long)(rlp))
-#define __sanitizer_syscall_pre_setrlimit(which, rlp)                          \
-  __sanitizer_syscall_pre_impl_setrlimit((long long)(which), (long long)(rlp))
-#define __sanitizer_syscall_post_setrlimit(res, which, rlp)                    \
-  __sanitizer_syscall_post_impl_setrlimit(res, (long long)(which),             \
-                                          (long long)(rlp))
-#define __sanitizer_syscall_pre_compat_12_getdirentries(fd, buf, count, basep) \
-  __sanitizer_syscall_pre_impl_compat_12_getdirentries(                        \
-      (long long)(fd), (long long)(buf), (long long)(count),                   \
-      (long long)(basep))
-#define __sanitizer_syscall_post_compat_12_getdirentries(res, fd, buf, count,  \
-                                                         basep)                \
-  __sanitizer_syscall_post_impl_compat_12_getdirentries(                       \
-      res, (long long)(fd), (long long)(buf), (long long)(count),              \
-      (long long)(basep))
-#define __sanitizer_syscall_pre_mmap(addr, len, prot, flags, fd, PAD, pos)     \
-  __sanitizer_syscall_pre_impl_mmap(                                           \
-      (long long)(addr), (long long)(len), (long long)(prot),                  \
-      (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos))
-#define __sanitizer_syscall_post_mmap(res, addr, len, prot, flags, fd, PAD,    \
-                                      pos)                                     \
-  __sanitizer_syscall_post_impl_mmap(                                          \
-      res, (long long)(addr), (long long)(len), (long long)(prot),             \
-      (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos))
-#define __sanitizer_syscall_pre___syscall(code, arg0, arg1, arg2, arg3, arg4,  \
-                                          arg5, arg6, arg7)                    \
-  __sanitizer_syscall_pre_impl___syscall(                                      \
-      (long long)(code), (long long)(arg0), (long long)(arg1),                 \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_post___syscall(res, code, arg0, arg1, arg2, arg3,  \
-                                           arg4, arg5, arg6, arg7)             \
-  __sanitizer_syscall_post_impl___syscall(                                     \
-      res, (long long)(code), (long long)(arg0), (long long)(arg1),            \
-      (long long)(arg2), (long long)(arg3), (long long)(arg4),                 \
-      (long long)(arg5), (long long)(arg6), (long long)(arg7))
-#define __sanitizer_syscall_pre_lseek(fd, PAD, offset, whence)                 \
-  __sanitizer_syscall_pre_impl_lseek((long long)(fd), (long long)(PAD),        \
-                                     (long long)(offset), (long long)(whence))
-#define __sanitizer_syscall_post_lseek(res, fd, PAD, offset, whence)           \
-  __sanitizer_syscall_post_impl_lseek(res, (long long)(fd), (long long)(PAD),  \
-                                      (long long)(offset),                     \
-                                      (long long)(whence))
-#define __sanitizer_syscall_pre_truncate(path, PAD, length)                    \
-  __sanitizer_syscall_pre_impl_truncate((long long)(path), (long long)(PAD),   \
-                                        (long long)(length))
-#define __sanitizer_syscall_post_truncate(res, path, PAD, length)              \
-  __sanitizer_syscall_post_impl_truncate(                                      \
-      res, (long long)(path), (long long)(PAD), (long long)(length))
-#define __sanitizer_syscall_pre_ftruncate(fd, PAD, length)                     \
-  __sanitizer_syscall_pre_impl_ftruncate((long long)(fd), (long long)(PAD),    \
-                                         (long long)(length))
-#define __sanitizer_syscall_post_ftruncate(res, fd, PAD, length)               \
-  __sanitizer_syscall_post_impl_ftruncate(                                     \
-      res, (long long)(fd), (long long)(PAD), (long long)(length))
-#define __sanitizer_syscall_pre___sysctl(name, namelen, oldv, oldlenp, newv,   \
-                                         newlen)                               \
-  __sanitizer_syscall_pre_impl___sysctl(                                       \
-      (long long)(name), (long long)(namelen), (long long)(oldv),              \
-      (long long)(oldlenp), (long long)(newv), (long long)(newlen))
-#define __sanitizer_syscall_post___sysctl(res, name, namelen, oldv, oldlenp,   \
-                                          newv, newlen)                        \
-  __sanitizer_syscall_post_impl___sysctl(                                      \
-      res, (long long)(name), (long long)(namelen), (long long)(oldv),         \
-      (long long)(oldlenp), (long long)(newv), (long long)(newlen))
-#define __sanitizer_syscall_pre_mlock(addr, len)                               \
-  __sanitizer_syscall_pre_impl_mlock((long long)(addr), (long long)(len))
-#define __sanitizer_syscall_post_mlock(res, addr, len)                         \
-  __sanitizer_syscall_post_impl_mlock(res, (long long)(addr), (long long)(len))
-#define __sanitizer_syscall_pre_munlock(addr, len)                             \
-  __sanitizer_syscall_pre_impl_munlock((long long)(addr), (long long)(len))
-#define __sanitizer_syscall_post_munlock(res, addr, len)                       \
-  __sanitizer_syscall_post_impl_munlock(res, (long long)(addr),                \
-                                        (long long)(len))
-#define __sanitizer_syscall_pre_undelete(path)                                 \
-  __sanitizer_syscall_pre_impl_undelete((long long)(path))
-#define __sanitizer_syscall_post_undelete(res, path)                           \
-  __sanitizer_syscall_post_impl_undelete(res, (long long)(path))
-#define __sanitizer_syscall_pre_compat_50_futimes(fd, tptr)                    \
-  __sanitizer_syscall_pre_impl_compat_50_futimes((long long)(fd),              \
-                                                 (long long)(tptr))
-#define __sanitizer_syscall_post_compat_50_futimes(res, fd, tptr)              \
-  __sanitizer_syscall_post_impl_compat_50_futimes(res, (long long)(fd),        \
-                                                  (long long)(tptr))
-#define __sanitizer_syscall_pre_getpgid(pid)                                   \
-  __sanitizer_syscall_pre_impl_getpgid((long long)(pid))
-#define __sanitizer_syscall_post_getpgid(res, pid)                             \
-  __sanitizer_syscall_post_impl_getpgid(res, (long long)(pid))
-#define __sanitizer_syscall_pre_reboot(opt, bootstr)                           \
-  __sanitizer_syscall_pre_impl_reboot((long long)(opt), (long long)(bootstr))
-#define __sanitizer_syscall_post_reboot(res, opt, bootstr)                     \
-  __sanitizer_syscall_post_impl_reboot(res, (long long)(opt),                  \
-                                       (long long)(bootstr))
-#define __sanitizer_syscall_pre_poll(fds, nfds, timeout)                       \
-  __sanitizer_syscall_pre_impl_poll((long long)(fds), (long long)(nfds),       \
-                                    (long long)(timeout))
-#define __sanitizer_syscall_post_poll(res, fds, nfds, timeout)                 \
-  __sanitizer_syscall_post_impl_poll(res, (long long)(fds), (long long)(nfds), \
-                                     (long long)(timeout))
-#define __sanitizer_syscall_pre_afssys(id, a1, a2, a3, a4, a5, a6)             \
-  __sanitizer_syscall_pre_impl_afssys(                                         \
-      (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3),      \
-      (long long)(a4), (long long)(a5), (long long)(a6))
-#define __sanitizer_syscall_post_afssys(res, id, a1, a2, a3, a4, a5, a6)       \
-  __sanitizer_syscall_post_impl_afssys(                                        \
-      res, (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3), \
-      (long long)(a4), (long long)(a5), (long long)(a6))
-/* syscall 211 has been skipped */
-/* syscall 212 has been skipped */
-/* syscall 213 has been skipped */
-/* syscall 214 has been skipped */
-/* syscall 215 has been skipped */
-/* syscall 216 has been skipped */
-/* syscall 217 has been skipped */
-/* syscall 218 has been skipped */
-/* syscall 219 has been skipped */
-#define __sanitizer_syscall_pre_compat_14___semctl(semid, semnum, cmd, arg)    \
-  __sanitizer_syscall_pre_impl_compat_14___semctl(                             \
-      (long long)(semid), (long long)(semnum), (long long)(cmd),               \
-      (long long)(arg))
-#define __sanitizer_syscall_post_compat_14___semctl(res, semid, semnum, cmd,   \
-                                                    arg)                       \
-  __sanitizer_syscall_post_impl_compat_14___semctl(                            \
-      res, (long long)(semid), (long long)(semnum), (long long)(cmd),          \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_semget(key, nsems, semflg)                     \
-  __sanitizer_syscall_pre_impl_semget((long long)(key), (long long)(nsems),    \
-                                      (long long)(semflg))
-#define __sanitizer_syscall_post_semget(res, key, nsems, semflg)               \
-  __sanitizer_syscall_post_impl_semget(                                        \
-      res, (long long)(key), (long long)(nsems), (long long)(semflg))
-#define __sanitizer_syscall_pre_semop(semid, sops, nsops)                      \
-  __sanitizer_syscall_pre_impl_semop((long long)(semid), (long long)(sops),    \
-                                     (long long)(nsops))
-#define __sanitizer_syscall_post_semop(res, semid, sops, nsops)                \
-  __sanitizer_syscall_post_impl_semop(res, (long long)(semid),                 \
-                                      (long long)(sops), (long long)(nsops))
-#define __sanitizer_syscall_pre_semconfig(flag)                                \
-  __sanitizer_syscall_pre_impl_semconfig((long long)(flag))
-#define __sanitizer_syscall_post_semconfig(res, flag)                          \
-  __sanitizer_syscall_post_impl_semconfig(res, (long long)(flag))
-#define __sanitizer_syscall_pre_compat_14_msgctl(msqid, cmd, buf)              \
-  __sanitizer_syscall_pre_impl_compat_14_msgctl(                               \
-      (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_14_msgctl(res, msqid, cmd, buf)        \
-  __sanitizer_syscall_post_impl_compat_14_msgctl(                              \
-      res, (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_msgget(key, msgflg)                            \
-  __sanitizer_syscall_pre_impl_msgget((long long)(key), (long long)(msgflg))
-#define __sanitizer_syscall_post_msgget(res, key, msgflg)                      \
-  __sanitizer_syscall_post_impl_msgget(res, (long long)(key),                  \
-                                       (long long)(msgflg))
-#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg)             \
-  __sanitizer_syscall_pre_impl_msgsnd((long long)(msqid), (long long)(msgp),   \
-                                      (long long)(msgsz), (long long)(msgflg))
-#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg)       \
-  __sanitizer_syscall_post_impl_msgsnd(res, (long long)(msqid),                \
-                                       (long long)(msgp), (long long)(msgsz),  \
-                                       (long long)(msgflg))
-#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg)     \
-  __sanitizer_syscall_pre_impl_msgrcv((long long)(msqid), (long long)(msgp),   \
-                                      (long long)(msgsz), (long long)(msgtyp), \
-                                      (long long)(msgflg))
-#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp,       \
-                                        msgflg)                                \
-  __sanitizer_syscall_post_impl_msgrcv(                                        \
-      res, (long long)(msqid), (long long)(msgp), (long long)(msgsz),          \
-      (long long)(msgtyp), (long long)(msgflg))
-#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg)                  \
-  __sanitizer_syscall_pre_impl_shmat((long long)(shmid), (long long)(shmaddr), \
-                                     (long long)(shmflg))
-#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg)            \
-  __sanitizer_syscall_post_impl_shmat(                                         \
-      res, (long long)(shmid), (long long)(shmaddr), (long long)(shmflg))
-#define __sanitizer_syscall_pre_compat_14_shmctl(shmid, cmd, buf)              \
-  __sanitizer_syscall_pre_impl_compat_14_shmctl(                               \
-      (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_14_shmctl(res, shmid, cmd, buf)        \
-  __sanitizer_syscall_post_impl_compat_14_shmctl(                              \
-      res, (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_shmdt(shmaddr)                                 \
-  __sanitizer_syscall_pre_impl_shmdt((long long)(shmaddr))
-#define __sanitizer_syscall_post_shmdt(res, shmaddr)                           \
-  __sanitizer_syscall_post_impl_shmdt(res, (long long)(shmaddr))
-#define __sanitizer_syscall_pre_shmget(key, size, shmflg)                      \
-  __sanitizer_syscall_pre_impl_shmget((long long)(key), (long long)(size),     \
-                                      (long long)(shmflg))
-#define __sanitizer_syscall_post_shmget(res, key, size, shmflg)                \
-  __sanitizer_syscall_post_impl_shmget(res, (long long)(key),                  \
-                                       (long long)(size), (long long)(shmflg))
-#define __sanitizer_syscall_pre_compat_50_clock_gettime(clock_id, tp)          \
-  __sanitizer_syscall_pre_impl_compat_50_clock_gettime((long long)(clock_id),  \
-                                                       (long long)(tp))
-#define __sanitizer_syscall_post_compat_50_clock_gettime(res, clock_id, tp)    \
-  __sanitizer_syscall_post_impl_compat_50_clock_gettime(                       \
-      res, (long long)(clock_id), (long long)(tp))
-#define __sanitizer_syscall_pre_compat_50_clock_settime(clock_id, tp)          \
-  __sanitizer_syscall_pre_impl_compat_50_clock_settime((long long)(clock_id),  \
-                                                       (long long)(tp))
-#define __sanitizer_syscall_post_compat_50_clock_settime(res, clock_id, tp)    \
-  __sanitizer_syscall_post_impl_compat_50_clock_settime(                       \
-      res, (long long)(clock_id), (long long)(tp))
-#define __sanitizer_syscall_pre_compat_50_clock_getres(clock_id, tp)           \
-  __sanitizer_syscall_pre_impl_compat_50_clock_getres((long long)(clock_id),   \
-                                                      (long long)(tp))
-#define __sanitizer_syscall_post_compat_50_clock_getres(res, clock_id, tp)     \
-  __sanitizer_syscall_post_impl_compat_50_clock_getres(                        \
-      res, (long long)(clock_id), (long long)(tp))
-#define __sanitizer_syscall_pre_timer_create(clock_id, evp, timerid)           \
-  __sanitizer_syscall_pre_impl_timer_create(                                   \
-      (long long)(clock_id), (long long)(evp), (long long)(timerid))
-#define __sanitizer_syscall_post_timer_create(res, clock_id, evp, timerid)     \
-  __sanitizer_syscall_post_impl_timer_create(                                  \
-      res, (long long)(clock_id), (long long)(evp), (long long)(timerid))
-#define __sanitizer_syscall_pre_timer_delete(timerid)                          \
-  __sanitizer_syscall_pre_impl_timer_delete((long long)(timerid))
-#define __sanitizer_syscall_post_timer_delete(res, timerid)                    \
-  __sanitizer_syscall_post_impl_timer_delete(res, (long long)(timerid))
-#define __sanitizer_syscall_pre_compat_50_timer_settime(timerid, flags, value, \
-                                                        ovalue)                \
-  __sanitizer_syscall_pre_impl_compat_50_timer_settime(                        \
-      (long long)(timerid), (long long)(flags), (long long)(value),            \
-      (long long)(ovalue))
-#define __sanitizer_syscall_post_compat_50_timer_settime(res, timerid, flags,  \
-                                                         value, ovalue)        \
-  __sanitizer_syscall_post_impl_compat_50_timer_settime(                       \
-      res, (long long)(timerid), (long long)(flags), (long long)(value),       \
-      (long long)(ovalue))
-#define __sanitizer_syscall_pre_compat_50_timer_gettime(timerid, value)        \
-  __sanitizer_syscall_pre_impl_compat_50_timer_gettime((long long)(timerid),   \
-                                                       (long long)(value))
-#define __sanitizer_syscall_post_compat_50_timer_gettime(res, timerid, value)  \
-  __sanitizer_syscall_post_impl_compat_50_timer_gettime(                       \
-      res, (long long)(timerid), (long long)(value))
-#define __sanitizer_syscall_pre_timer_getoverrun(timerid)                      \
-  __sanitizer_syscall_pre_impl_timer_getoverrun((long long)(timerid))
-#define __sanitizer_syscall_post_timer_getoverrun(res, timerid)                \
-  __sanitizer_syscall_post_impl_timer_getoverrun(res, (long long)(timerid))
-#define __sanitizer_syscall_pre_compat_50_nanosleep(rqtp, rmtp)                \
-  __sanitizer_syscall_pre_impl_compat_50_nanosleep((long long)(rqtp),          \
-                                                   (long long)(rmtp))
-#define __sanitizer_syscall_post_compat_50_nanosleep(res, rqtp, rmtp)          \
-  __sanitizer_syscall_post_impl_compat_50_nanosleep(res, (long long)(rqtp),    \
-                                                    (long long)(rmtp))
-#define __sanitizer_syscall_pre_fdatasync(fd)                                  \
-  __sanitizer_syscall_pre_impl_fdatasync((long long)(fd))
-#define __sanitizer_syscall_post_fdatasync(res, fd)                            \
-  __sanitizer_syscall_post_impl_fdatasync(res, (long long)(fd))
-#define __sanitizer_syscall_pre_mlockall(flags)                                \
-  __sanitizer_syscall_pre_impl_mlockall((long long)(flags))
-#define __sanitizer_syscall_post_mlockall(res, flags)                          \
-  __sanitizer_syscall_post_impl_mlockall(res, (long long)(flags))
-#define __sanitizer_syscall_pre_munlockall()                                   \
-  __sanitizer_syscall_pre_impl_munlockall()
-#define __sanitizer_syscall_post_munlockall(res)                               \
-  __sanitizer_syscall_post_impl_munlockall(res)
-#define __sanitizer_syscall_pre_compat_50___sigtimedwait(set, info, timeout)   \
-  __sanitizer_syscall_pre_impl_compat_50___sigtimedwait(                       \
-      (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_post_compat_50___sigtimedwait(res, set, info,      \
-                                                          timeout)             \
-  __sanitizer_syscall_post_impl_compat_50___sigtimedwait(                      \
-      res, (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_pre_sigqueueinfo(pid, info)                        \
-  __sanitizer_syscall_pre_impl_sigqueueinfo((long long)(pid), (long long)(info))
-#define __sanitizer_syscall_post_sigqueueinfo(res, pid, info)                  \
-  __sanitizer_syscall_post_impl_sigqueueinfo(res, (long long)(pid),            \
-                                             (long long)(info))
-#define __sanitizer_syscall_pre_modctl(cmd, arg)                               \
-  __sanitizer_syscall_pre_impl_modctl((long long)(cmd), (long long)(arg))
-#define __sanitizer_syscall_post_modctl(res, cmd, arg)                         \
-  __sanitizer_syscall_post_impl_modctl(res, (long long)(cmd), (long long)(arg))
-#define __sanitizer_syscall_pre__ksem_init(value, idp)                         \
-  __sanitizer_syscall_pre_impl__ksem_init((long long)(value), (long long)(idp))
-#define __sanitizer_syscall_post__ksem_init(res, value, idp)                   \
-  __sanitizer_syscall_post_impl__ksem_init(res, (long long)(value),            \
-                                           (long long)(idp))
-#define __sanitizer_syscall_pre__ksem_open(name, oflag, mode, value, idp)      \
-  __sanitizer_syscall_pre_impl__ksem_open(                                     \
-      (long long)(name), (long long)(oflag), (long long)(mode),                \
-      (long long)(value), (long long)(idp))
-#define __sanitizer_syscall_post__ksem_open(res, name, oflag, mode, value,     \
-                                            idp)                               \
-  __sanitizer_syscall_post_impl__ksem_open(                                    \
-      res, (long long)(name), (long long)(oflag), (long long)(mode),           \
-      (long long)(value), (long long)(idp))
-#define __sanitizer_syscall_pre__ksem_unlink(name)                             \
-  __sanitizer_syscall_pre_impl__ksem_unlink((long long)(name))
-#define __sanitizer_syscall_post__ksem_unlink(res, name)                       \
-  __sanitizer_syscall_post_impl__ksem_unlink(res, (long long)(name))
-#define __sanitizer_syscall_pre__ksem_close(id)                                \
-  __sanitizer_syscall_pre_impl__ksem_close((long long)(id))
-#define __sanitizer_syscall_post__ksem_close(res, id)                          \
-  __sanitizer_syscall_post_impl__ksem_close(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_post(id)                                 \
-  __sanitizer_syscall_pre_impl__ksem_post((long long)(id))
-#define __sanitizer_syscall_post__ksem_post(res, id)                           \
-  __sanitizer_syscall_post_impl__ksem_post(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_wait(id)                                 \
-  __sanitizer_syscall_pre_impl__ksem_wait((long long)(id))
-#define __sanitizer_syscall_post__ksem_wait(res, id)                           \
-  __sanitizer_syscall_post_impl__ksem_wait(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_trywait(id)                              \
-  __sanitizer_syscall_pre_impl__ksem_trywait((long long)(id))
-#define __sanitizer_syscall_post__ksem_trywait(res, id)                        \
-  __sanitizer_syscall_post_impl__ksem_trywait(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_getvalue(id, value)                      \
-  __sanitizer_syscall_pre_impl__ksem_getvalue((long long)(id),                 \
-                                              (long long)(value))
-#define __sanitizer_syscall_post__ksem_getvalue(res, id, value)                \
-  __sanitizer_syscall_post_impl__ksem_getvalue(res, (long long)(id),           \
-                                               (long long)(value))
-#define __sanitizer_syscall_pre__ksem_destroy(id)                              \
-  __sanitizer_syscall_pre_impl__ksem_destroy((long long)(id))
-#define __sanitizer_syscall_post__ksem_destroy(res, id)                        \
-  __sanitizer_syscall_post_impl__ksem_destroy(res, (long long)(id))
-#define __sanitizer_syscall_pre__ksem_timedwait(id, abstime)                   \
-  __sanitizer_syscall_pre_impl__ksem_timedwait((long long)(id),                \
-                                               (long long)(abstime))
-#define __sanitizer_syscall_post__ksem_timedwait(res, id, abstime)             \
-  __sanitizer_syscall_post_impl__ksem_timedwait(res, (long long)(id),          \
-                                                (long long)(abstime))
-#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr)               \
-  __sanitizer_syscall_pre_impl_mq_open((long long)(name), (long long)(oflag),  \
-                                       (long long)(mode), (long long)(attr))
-#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr)         \
-  __sanitizer_syscall_post_impl_mq_open(res, (long long)(name),                \
-                                        (long long)(oflag), (long long)(mode), \
-                                        (long long)(attr))
-#define __sanitizer_syscall_pre_mq_close(mqdes)                                \
-  __sanitizer_syscall_pre_impl_mq_close((long long)(mqdes))
-#define __sanitizer_syscall_post_mq_close(res, mqdes)                          \
-  __sanitizer_syscall_post_impl_mq_close(res, (long long)(mqdes))
-#define __sanitizer_syscall_pre_mq_unlink(name)                                \
-  __sanitizer_syscall_pre_impl_mq_unlink((long long)(name))
-#define __sanitizer_syscall_post_mq_unlink(res, name)                          \
-  __sanitizer_syscall_post_impl_mq_unlink(res, (long long)(name))
-#define __sanitizer_syscall_pre_mq_getattr(mqdes, mqstat)                      \
-  __sanitizer_syscall_pre_impl_mq_getattr((long long)(mqdes),                  \
-                                          (long long)(mqstat))
-#define __sanitizer_syscall_post_mq_getattr(res, mqdes, mqstat)                \
-  __sanitizer_syscall_post_impl_mq_getattr(res, (long long)(mqdes),            \
-                                           (long long)(mqstat))
-#define __sanitizer_syscall_pre_mq_setattr(mqdes, mqstat, omqstat)             \
-  __sanitizer_syscall_pre_impl_mq_setattr(                                     \
-      (long long)(mqdes), (long long)(mqstat), (long long)(omqstat))
-#define __sanitizer_syscall_post_mq_setattr(res, mqdes, mqstat, omqstat)       \
-  __sanitizer_syscall_post_impl_mq_setattr(                                    \
-      res, (long long)(mqdes), (long long)(mqstat), (long long)(omqstat))
-#define __sanitizer_syscall_pre_mq_notify(mqdes, notification)                 \
-  __sanitizer_syscall_pre_impl_mq_notify((long long)(mqdes),                   \
-                                         (long long)(notification))
-#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification)           \
-  __sanitizer_syscall_post_impl_mq_notify(res, (long long)(mqdes),             \
-                                          (long long)(notification))
-#define __sanitizer_syscall_pre_mq_send(mqdes, msg_ptr, msg_len, msg_prio)     \
-  __sanitizer_syscall_pre_impl_mq_send(                                        \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_post_mq_send(res, mqdes, msg_ptr, msg_len,         \
-                                         msg_prio)                             \
-  __sanitizer_syscall_post_impl_mq_send(                                       \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_pre_mq_receive(mqdes, msg_ptr, msg_len, msg_prio)  \
-  __sanitizer_syscall_pre_impl_mq_receive(                                     \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_post_mq_receive(res, mqdes, msg_ptr, msg_len,      \
-                                            msg_prio)                          \
-  __sanitizer_syscall_post_impl_mq_receive(                                    \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio))
-#define __sanitizer_syscall_pre_compat_50_mq_timedsend(                        \
-    mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                            \
-  __sanitizer_syscall_pre_impl_compat_50_mq_timedsend(                         \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post_compat_50_mq_timedsend(                       \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl_compat_50_mq_timedsend(                        \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_pre_compat_50_mq_timedreceive(                     \
-    mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                            \
-  __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive(                      \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post_compat_50_mq_timedreceive(                    \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl_compat_50_mq_timedreceive(                     \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-/* syscall 267 has been skipped */
-/* syscall 268 has been skipped */
-/* syscall 269 has been skipped */
-#define __sanitizer_syscall_pre___posix_rename(from, to)                       \
-  __sanitizer_syscall_pre_impl___posix_rename((long long)(from),               \
-                                              (long long)(to))
-#define __sanitizer_syscall_post___posix_rename(res, from, to)                 \
-  __sanitizer_syscall_post_impl___posix_rename(res, (long long)(from),         \
-                                               (long long)(to))
-#define __sanitizer_syscall_pre_swapctl(cmd, arg, misc)                        \
-  __sanitizer_syscall_pre_impl_swapctl((long long)(cmd), (long long)(arg),     \
-                                       (long long)(misc))
-#define __sanitizer_syscall_post_swapctl(res, cmd, arg, misc)                  \
-  __sanitizer_syscall_post_impl_swapctl(res, (long long)(cmd),                 \
-                                        (long long)(arg), (long long)(misc))
-#define __sanitizer_syscall_pre_compat_30_getdents(fd, buf, count)             \
-  __sanitizer_syscall_pre_impl_compat_30_getdents(                             \
-      (long long)(fd), (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_post_compat_30_getdents(res, fd, buf, count)       \
-  __sanitizer_syscall_post_impl_compat_30_getdents(                            \
-      res, (long long)(fd), (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_pre_minherit(addr, len, inherit)                   \
-  __sanitizer_syscall_pre_impl_minherit((long long)(addr), (long long)(len),   \
-                                        (long long)(inherit))
-#define __sanitizer_syscall_post_minherit(res, addr, len, inherit)             \
-  __sanitizer_syscall_post_impl_minherit(                                      \
-      res, (long long)(addr), (long long)(len), (long long)(inherit))
-#define __sanitizer_syscall_pre_lchmod(path, mode)                             \
-  __sanitizer_syscall_pre_impl_lchmod((long long)(path), (long long)(mode))
-#define __sanitizer_syscall_post_lchmod(res, path, mode)                       \
-  __sanitizer_syscall_post_impl_lchmod(res, (long long)(path),                 \
-                                       (long long)(mode))
-#define __sanitizer_syscall_pre_lchown(path, uid, gid)                         \
-  __sanitizer_syscall_pre_impl_lchown((long long)(path), (long long)(uid),     \
-                                      (long long)(gid))
-#define __sanitizer_syscall_post_lchown(res, path, uid, gid)                   \
-  __sanitizer_syscall_post_impl_lchown(res, (long long)(path),                 \
-                                       (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre_compat_50_lutimes(path, tptr)                  \
-  __sanitizer_syscall_pre_impl_compat_50_lutimes((long long)(path),            \
-                                                 (long long)(tptr))
-#define __sanitizer_syscall_post_compat_50_lutimes(res, path, tptr)            \
-  __sanitizer_syscall_post_impl_compat_50_lutimes(res, (long long)(path),      \
-                                                  (long long)(tptr))
-#define __sanitizer_syscall_pre___msync13(addr, len, flags)                    \
-  __sanitizer_syscall_pre_impl___msync13((long long)(addr), (long long)(len),  \
-                                         (long long)(flags))
-#define __sanitizer_syscall_post___msync13(res, addr, len, flags)              \
-  __sanitizer_syscall_post_impl___msync13(                                     \
-      res, (long long)(addr), (long long)(len), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_30___stat13(path, ub)                   \
-  __sanitizer_syscall_pre_impl_compat_30___stat13((long long)(path),           \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_post_compat_30___stat13(res, path, ub)             \
-  __sanitizer_syscall_post_impl_compat_30___stat13(res, (long long)(path),     \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_pre_compat_30___fstat13(fd, sb)                    \
-  __sanitizer_syscall_pre_impl_compat_30___fstat13((long long)(fd),            \
-                                                   (long long)(sb))
-#define __sanitizer_syscall_post_compat_30___fstat13(res, fd, sb)              \
-  __sanitizer_syscall_post_impl_compat_30___fstat13(res, (long long)(fd),      \
-                                                    (long long)(sb))
-#define __sanitizer_syscall_pre_compat_30___lstat13(path, ub)                  \
-  __sanitizer_syscall_pre_impl_compat_30___lstat13((long long)(path),          \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_post_compat_30___lstat13(res, path, ub)            \
-  __sanitizer_syscall_post_impl_compat_30___lstat13(res, (long long)(path),    \
-                                                    (long long)(ub))
-#define __sanitizer_syscall_pre___sigaltstack14(nss, oss)                      \
-  __sanitizer_syscall_pre_impl___sigaltstack14((long long)(nss),               \
-                                               (long long)(oss))
-#define __sanitizer_syscall_post___sigaltstack14(res, nss, oss)                \
-  __sanitizer_syscall_post_impl___sigaltstack14(res, (long long)(nss),         \
-                                                (long long)(oss))
-#define __sanitizer_syscall_pre___vfork14()                                    \
-  __sanitizer_syscall_pre_impl___vfork14()
-#define __sanitizer_syscall_post___vfork14(res)                                \
-  __sanitizer_syscall_post_impl___vfork14(res)
-#define __sanitizer_syscall_pre___posix_chown(path, uid, gid)                  \
-  __sanitizer_syscall_pre_impl___posix_chown(                                  \
-      (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_post___posix_chown(res, path, uid, gid)            \
-  __sanitizer_syscall_post_impl___posix_chown(                                 \
-      res, (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre___posix_fchown(fd, uid, gid)                   \
-  __sanitizer_syscall_pre_impl___posix_fchown(                                 \
-      (long long)(fd), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_post___posix_fchown(res, fd, uid, gid)             \
-  __sanitizer_syscall_post_impl___posix_fchown(                                \
-      res, (long long)(fd), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre___posix_lchown(path, uid, gid)                 \
-  __sanitizer_syscall_pre_impl___posix_lchown(                                 \
-      (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_post___posix_lchown(res, path, uid, gid)           \
-  __sanitizer_syscall_post_impl___posix_lchown(                                \
-      res, (long long)(path), (long long)(uid), (long long)(gid))
-#define __sanitizer_syscall_pre_getsid(pid)                                    \
-  __sanitizer_syscall_pre_impl_getsid((long long)(pid))
-#define __sanitizer_syscall_post_getsid(res, pid)                              \
-  __sanitizer_syscall_post_impl_getsid(res, (long long)(pid))
-#define __sanitizer_syscall_pre___clone(flags, stack)                          \
-  __sanitizer_syscall_pre_impl___clone((long long)(flags), (long long)(stack))
-#define __sanitizer_syscall_post___clone(res, flags, stack)                    \
-  __sanitizer_syscall_post_impl___clone(res, (long long)(flags),               \
-                                        (long long)(stack))
-#define __sanitizer_syscall_pre_fktrace(fd, ops, facs, pid)                    \
-  __sanitizer_syscall_pre_impl_fktrace((long long)(fd), (long long)(ops),      \
-                                       (long long)(facs), (long long)(pid))
-#define __sanitizer_syscall_post_fktrace(res, fd, ops, facs, pid)              \
-  __sanitizer_syscall_post_impl_fktrace(res, (long long)(fd),                  \
-                                        (long long)(ops), (long long)(facs),   \
-                                        (long long)(pid))
-#define __sanitizer_syscall_pre_preadv(fd, iovp, iovcnt, PAD, offset)          \
-  __sanitizer_syscall_pre_impl_preadv((long long)(fd), (long long)(iovp),      \
-                                      (long long)(iovcnt), (long long)(PAD),   \
-                                      (long long)(offset))
-#define __sanitizer_syscall_post_preadv(res, fd, iovp, iovcnt, PAD, offset)    \
-  __sanitizer_syscall_post_impl_preadv(res, (long long)(fd),                   \
-                                       (long long)(iovp), (long long)(iovcnt), \
-                                       (long long)(PAD), (long long)(offset))
-#define __sanitizer_syscall_pre_pwritev(fd, iovp, iovcnt, PAD, offset)         \
-  __sanitizer_syscall_pre_impl_pwritev((long long)(fd), (long long)(iovp),     \
-                                       (long long)(iovcnt), (long long)(PAD),  \
-                                       (long long)(offset))
-#define __sanitizer_syscall_post_pwritev(res, fd, iovp, iovcnt, PAD, offset)   \
-  __sanitizer_syscall_post_impl_pwritev(                                       \
-      res, (long long)(fd), (long long)(iovp), (long long)(iovcnt),            \
-      (long long)(PAD), (long long)(offset))
-#define __sanitizer_syscall_pre_compat_16___sigaction14(signum, nsa, osa)      \
-  __sanitizer_syscall_pre_impl_compat_16___sigaction14(                        \
-      (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_post_compat_16___sigaction14(res, signum, nsa,     \
-                                                         osa)                  \
-  __sanitizer_syscall_post_impl_compat_16___sigaction14(                       \
-      res, (long long)(signum), (long long)(nsa), (long long)(osa))
-#define __sanitizer_syscall_pre___sigpending14(set)                            \
-  __sanitizer_syscall_pre_impl___sigpending14((long long)(set))
-#define __sanitizer_syscall_post___sigpending14(res, set)                      \
-  __sanitizer_syscall_post_impl___sigpending14(res, (long long)(set))
-#define __sanitizer_syscall_pre___sigprocmask14(how, set, oset)                \
-  __sanitizer_syscall_pre_impl___sigprocmask14(                                \
-      (long long)(how), (long long)(set), (long long)(oset))
-#define __sanitizer_syscall_post___sigprocmask14(res, how, set, oset)          \
-  __sanitizer_syscall_post_impl___sigprocmask14(                               \
-      res, (long long)(how), (long long)(set), (long long)(oset))
-#define __sanitizer_syscall_pre___sigsuspend14(set)                            \
-  __sanitizer_syscall_pre_impl___sigsuspend14((long long)(set))
-#define __sanitizer_syscall_post___sigsuspend14(res, set)                      \
-  __sanitizer_syscall_post_impl___sigsuspend14(res, (long long)(set))
-#define __sanitizer_syscall_pre_compat_16___sigreturn14(sigcntxp)              \
-  __sanitizer_syscall_pre_impl_compat_16___sigreturn14((long long)(sigcntxp))
-#define __sanitizer_syscall_post_compat_16___sigreturn14(res, sigcntxp)        \
-  __sanitizer_syscall_post_impl_compat_16___sigreturn14(res,                   \
-                                                        (long long)(sigcntxp))
-#define __sanitizer_syscall_pre___getcwd(bufp, length)                         \
-  __sanitizer_syscall_pre_impl___getcwd((long long)(bufp), (long long)(length))
-#define __sanitizer_syscall_post___getcwd(res, bufp, length)                   \
-  __sanitizer_syscall_post_impl___getcwd(res, (long long)(bufp),               \
-                                         (long long)(length))
-#define __sanitizer_syscall_pre_fchroot(fd)                                    \
-  __sanitizer_syscall_pre_impl_fchroot((long long)(fd))
-#define __sanitizer_syscall_post_fchroot(res, fd)                              \
-  __sanitizer_syscall_post_impl_fchroot(res, (long long)(fd))
-#define __sanitizer_syscall_pre_compat_30_fhopen(fhp, flags)                   \
-  __sanitizer_syscall_pre_impl_compat_30_fhopen((long long)(fhp),              \
-                                                (long long)(flags))
-#define __sanitizer_syscall_post_compat_30_fhopen(res, fhp, flags)             \
-  __sanitizer_syscall_post_impl_compat_30_fhopen(res, (long long)(fhp),        \
-                                                 (long long)(flags))
-#define __sanitizer_syscall_pre_compat_30_fhstat(fhp, sb)                      \
-  __sanitizer_syscall_pre_impl_compat_30_fhstat((long long)(fhp),              \
-                                                (long long)(sb))
-#define __sanitizer_syscall_post_compat_30_fhstat(res, fhp, sb)                \
-  __sanitizer_syscall_post_impl_compat_30_fhstat(res, (long long)(fhp),        \
-                                                 (long long)(sb))
-#define __sanitizer_syscall_pre_compat_20_fhstatfs(fhp, buf)                   \
-  __sanitizer_syscall_pre_impl_compat_20_fhstatfs((long long)(fhp),            \
-                                                  (long long)(buf))
-#define __sanitizer_syscall_post_compat_20_fhstatfs(res, fhp, buf)             \
-  __sanitizer_syscall_post_impl_compat_20_fhstatfs(res, (long long)(fhp),      \
-                                                   (long long)(buf))
-#define __sanitizer_syscall_pre_compat_50_____semctl13(semid, semnum, cmd,     \
-                                                       arg)                    \
-  __sanitizer_syscall_pre_impl_compat_50_____semctl13(                         \
-      (long long)(semid), (long long)(semnum), (long long)(cmd),               \
-      (long long)(arg))
-#define __sanitizer_syscall_post_compat_50_____semctl13(res, semid, semnum,    \
-                                                        cmd, arg)              \
-  __sanitizer_syscall_post_impl_compat_50_____semctl13(                        \
-      res, (long long)(semid), (long long)(semnum), (long long)(cmd),          \
-      (long long)(arg))
-#define __sanitizer_syscall_pre_compat_50___msgctl13(msqid, cmd, buf)          \
-  __sanitizer_syscall_pre_impl_compat_50___msgctl13(                           \
-      (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_50___msgctl13(res, msqid, cmd, buf)    \
-  __sanitizer_syscall_post_impl_compat_50___msgctl13(                          \
-      res, (long long)(msqid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_compat_50___shmctl13(shmid, cmd, buf)          \
-  __sanitizer_syscall_pre_impl_compat_50___shmctl13(                           \
-      (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post_compat_50___shmctl13(res, shmid, cmd, buf)    \
-  __sanitizer_syscall_post_impl_compat_50___shmctl13(                          \
-      res, (long long)(shmid), (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre_lchflags(path, flags)                          \
-  __sanitizer_syscall_pre_impl_lchflags((long long)(path), (long long)(flags))
-#define __sanitizer_syscall_post_lchflags(res, path, flags)                    \
-  __sanitizer_syscall_post_impl_lchflags(res, (long long)(path),               \
-                                         (long long)(flags))
-#define __sanitizer_syscall_pre_issetugid()                                    \
-  __sanitizer_syscall_pre_impl_issetugid()
-#define __sanitizer_syscall_post_issetugid(res)                                \
-  __sanitizer_syscall_post_impl_issetugid(res)
-#define __sanitizer_syscall_pre_utrace(label, addr, len)                       \
-  __sanitizer_syscall_pre_impl_utrace((long long)(label), (long long)(addr),   \
-                                      (long long)(len))
-#define __sanitizer_syscall_post_utrace(res, label, addr, len)                 \
-  __sanitizer_syscall_post_impl_utrace(res, (long long)(label),                \
-                                       (long long)(addr), (long long)(len))
-#define __sanitizer_syscall_pre_getcontext(ucp)                                \
-  __sanitizer_syscall_pre_impl_getcontext((long long)(ucp))
-#define __sanitizer_syscall_post_getcontext(res, ucp)                          \
-  __sanitizer_syscall_post_impl_getcontext(res, (long long)(ucp))
-#define __sanitizer_syscall_pre_setcontext(ucp)                                \
-  __sanitizer_syscall_pre_impl_setcontext((long long)(ucp))
-#define __sanitizer_syscall_post_setcontext(res, ucp)                          \
-  __sanitizer_syscall_post_impl_setcontext(res, (long long)(ucp))
-#define __sanitizer_syscall_pre__lwp_create(ucp, flags, new_lwp)               \
-  __sanitizer_syscall_pre_impl__lwp_create(                                    \
-      (long long)(ucp), (long long)(flags), (long long)(new_lwp))
-#define __sanitizer_syscall_post__lwp_create(res, ucp, flags, new_lwp)         \
-  __sanitizer_syscall_post_impl__lwp_create(                                   \
-      res, (long long)(ucp), (long long)(flags), (long long)(new_lwp))
-#define __sanitizer_syscall_pre__lwp_exit()                                    \
-  __sanitizer_syscall_pre_impl__lwp_exit()
-#define __sanitizer_syscall_post__lwp_exit(res)                                \
-  __sanitizer_syscall_post_impl__lwp_exit(res)
-#define __sanitizer_syscall_pre__lwp_self()                                    \
-  __sanitizer_syscall_pre_impl__lwp_self()
-#define __sanitizer_syscall_post__lwp_self(res)                                \
-  __sanitizer_syscall_post_impl__lwp_self(res)
-#define __sanitizer_syscall_pre__lwp_wait(wait_for, departed)                  \
-  __sanitizer_syscall_pre_impl__lwp_wait((long long)(wait_for),                \
-                                         (long long)(departed))
-#define __sanitizer_syscall_post__lwp_wait(res, wait_for, departed)            \
-  __sanitizer_syscall_post_impl__lwp_wait(res, (long long)(wait_for),          \
-                                          (long long)(departed))
-#define __sanitizer_syscall_pre__lwp_suspend(target)                           \
-  __sanitizer_syscall_pre_impl__lwp_suspend((long long)(target))
-#define __sanitizer_syscall_post__lwp_suspend(res, target)                     \
-  __sanitizer_syscall_post_impl__lwp_suspend(res, (long long)(target))
-#define __sanitizer_syscall_pre__lwp_continue(target)                          \
-  __sanitizer_syscall_pre_impl__lwp_continue((long long)(target))
-#define __sanitizer_syscall_post__lwp_continue(res, target)                    \
-  __sanitizer_syscall_post_impl__lwp_continue(res, (long long)(target))
-#define __sanitizer_syscall_pre__lwp_wakeup(target)                            \
-  __sanitizer_syscall_pre_impl__lwp_wakeup((long long)(target))
-#define __sanitizer_syscall_post__lwp_wakeup(res, target)                      \
-  __sanitizer_syscall_post_impl__lwp_wakeup(res, (long long)(target))
-#define __sanitizer_syscall_pre__lwp_getprivate()                              \
-  __sanitizer_syscall_pre_impl__lwp_getprivate()
-#define __sanitizer_syscall_post__lwp_getprivate(res)                          \
-  __sanitizer_syscall_post_impl__lwp_getprivate(res)
-#define __sanitizer_syscall_pre__lwp_setprivate(ptr)                           \
-  __sanitizer_syscall_pre_impl__lwp_setprivate((long long)(ptr))
-#define __sanitizer_syscall_post__lwp_setprivate(res, ptr)                     \
-  __sanitizer_syscall_post_impl__lwp_setprivate(res, (long long)(ptr))
-#define __sanitizer_syscall_pre__lwp_kill(target, signo)                       \
-  __sanitizer_syscall_pre_impl__lwp_kill((long long)(target),                  \
-                                         (long long)(signo))
-#define __sanitizer_syscall_post__lwp_kill(res, target, signo)                 \
-  __sanitizer_syscall_post_impl__lwp_kill(res, (long long)(target),            \
-                                          (long long)(signo))
-#define __sanitizer_syscall_pre__lwp_detach(target)                            \
-  __sanitizer_syscall_pre_impl__lwp_detach((long long)(target))
-#define __sanitizer_syscall_post__lwp_detach(res, target)                      \
-  __sanitizer_syscall_post_impl__lwp_detach(res, (long long)(target))
-#define __sanitizer_syscall_pre_compat_50__lwp_park(ts, unpark, hint,          \
-                                                    unparkhint)                \
-  __sanitizer_syscall_pre_impl_compat_50__lwp_park(                            \
-      (long long)(ts), (long long)(unpark), (long long)(hint),                 \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_post_compat_50__lwp_park(res, ts, unpark, hint,    \
-                                                     unparkhint)               \
-  __sanitizer_syscall_post_impl_compat_50__lwp_park(                           \
-      res, (long long)(ts), (long long)(unpark), (long long)(hint),            \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_pre__lwp_unpark(target, hint)                      \
-  __sanitizer_syscall_pre_impl__lwp_unpark((long long)(target),                \
-                                           (long long)(hint))
-#define __sanitizer_syscall_post__lwp_unpark(res, target, hint)                \
-  __sanitizer_syscall_post_impl__lwp_unpark(res, (long long)(target),          \
-                                            (long long)(hint))
-#define __sanitizer_syscall_pre__lwp_unpark_all(targets, ntargets, hint)       \
-  __sanitizer_syscall_pre_impl__lwp_unpark_all(                                \
-      (long long)(targets), (long long)(ntargets), (long long)(hint))
-#define __sanitizer_syscall_post__lwp_unpark_all(res, targets, ntargets, hint) \
-  __sanitizer_syscall_post_impl__lwp_unpark_all(                               \
-      res, (long long)(targets), (long long)(ntargets), (long long)(hint))
-#define __sanitizer_syscall_pre__lwp_setname(target, name)                     \
-  __sanitizer_syscall_pre_impl__lwp_setname((long long)(target),               \
-                                            (long long)(name))
-#define __sanitizer_syscall_post__lwp_setname(res, target, name)               \
-  __sanitizer_syscall_post_impl__lwp_setname(res, (long long)(target),         \
-                                             (long long)(name))
-#define __sanitizer_syscall_pre__lwp_getname(target, name, len)                \
-  __sanitizer_syscall_pre_impl__lwp_getname(                                   \
-      (long long)(target), (long long)(name), (long long)(len))
-#define __sanitizer_syscall_post__lwp_getname(res, target, name, len)          \
-  __sanitizer_syscall_post_impl__lwp_getname(                                  \
-      res, (long long)(target), (long long)(name), (long long)(len))
-#define __sanitizer_syscall_pre__lwp_ctl(features, address)                    \
-  __sanitizer_syscall_pre_impl__lwp_ctl((long long)(features),                 \
-                                        (long long)(address))
-#define __sanitizer_syscall_post__lwp_ctl(res, features, address)              \
-  __sanitizer_syscall_post_impl__lwp_ctl(res, (long long)(features),           \
-                                         (long long)(address))
-/* syscall 326 has been skipped */
-/* syscall 327 has been skipped */
-/* syscall 328 has been skipped */
-/* syscall 329 has been skipped */
-#define __sanitizer_syscall_pre_compat_60_sa_register(newv, oldv, flags,       \
-                                                      stackinfo_offset)        \
-  __sanitizer_syscall_pre_impl_compat_60_sa_register(                          \
-      (long long)(newv), (long long)(oldv), (long long)(flags),                \
-      (long long)(stackinfo_offset))
-#define __sanitizer_syscall_post_compat_60_sa_register(res, newv, oldv, flags, \
-                                                       stackinfo_offset)       \
-  __sanitizer_syscall_post_impl_compat_60_sa_register(                         \
-      res, (long long)(newv), (long long)(oldv), (long long)(flags),           \
-      (long long)(stackinfo_offset))
-#define __sanitizer_syscall_pre_compat_60_sa_stacks(num, stacks)               \
-  __sanitizer_syscall_pre_impl_compat_60_sa_stacks((long long)(num),           \
-                                                   (long long)(stacks))
-#define __sanitizer_syscall_post_compat_60_sa_stacks(res, num, stacks)         \
-  __sanitizer_syscall_post_impl_compat_60_sa_stacks(res, (long long)(num),     \
-                                                    (long long)(stacks))
-#define __sanitizer_syscall_pre_compat_60_sa_enable()                          \
-  __sanitizer_syscall_pre_impl_compat_60_sa_enable()
-#define __sanitizer_syscall_post_compat_60_sa_enable(res)                      \
-  __sanitizer_syscall_post_impl_compat_60_sa_enable(res)
-#define __sanitizer_syscall_pre_compat_60_sa_setconcurrency(concurrency)       \
-  __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency(                    \
-      (long long)(concurrency))
-#define __sanitizer_syscall_post_compat_60_sa_setconcurrency(res, concurrency) \
-  __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency(                   \
-      res, (long long)(concurrency))
-#define __sanitizer_syscall_pre_compat_60_sa_yield()                           \
-  __sanitizer_syscall_pre_impl_compat_60_sa_yield()
-#define __sanitizer_syscall_post_compat_60_sa_yield(res)                       \
-  __sanitizer_syscall_post_impl_compat_60_sa_yield(res)
-#define __sanitizer_syscall_pre_compat_60_sa_preempt(sa_id)                    \
-  __sanitizer_syscall_pre_impl_compat_60_sa_preempt((long long)(sa_id))
-#define __sanitizer_syscall_post_compat_60_sa_preempt(res, sa_id)              \
-  __sanitizer_syscall_post_impl_compat_60_sa_preempt(res, (long long)(sa_id))
-/* syscall 336 has been skipped */
-/* syscall 337 has been skipped */
-/* syscall 338 has been skipped */
-/* syscall 339 has been skipped */
-#define __sanitizer_syscall_pre___sigaction_sigtramp(signum, nsa, osa, tramp,  \
-                                                     vers)                     \
-  __sanitizer_syscall_pre_impl___sigaction_sigtramp(                           \
-      (long long)(signum), (long long)(nsa), (long long)(osa),                 \
-      (long long)(tramp), (long long)(vers))
-#define __sanitizer_syscall_post___sigaction_sigtramp(res, signum, nsa, osa,   \
-                                                      tramp, vers)             \
-  __sanitizer_syscall_post_impl___sigaction_sigtramp(                          \
-      res, (long long)(signum), (long long)(nsa), (long long)(osa),            \
-      (long long)(tramp), (long long)(vers))
-/* syscall 341 has been skipped */
-/* syscall 342 has been skipped */
-#define __sanitizer_syscall_pre_rasctl(addr, len, op)                          \
-  __sanitizer_syscall_pre_impl_rasctl((long long)(addr), (long long)(len),     \
-                                      (long long)(op))
-#define __sanitizer_syscall_post_rasctl(res, addr, len, op)                    \
-  __sanitizer_syscall_post_impl_rasctl(res, (long long)(addr),                 \
-                                       (long long)(len), (long long)(op))
-#define __sanitizer_syscall_pre_kqueue() __sanitizer_syscall_pre_impl_kqueue()
-#define __sanitizer_syscall_post_kqueue(res)                                   \
-  __sanitizer_syscall_post_impl_kqueue(res)
-#define __sanitizer_syscall_pre_compat_50_kevent(fd, changelist, nchanges,     \
-                                                 eventlist, nevents, timeout)  \
-  __sanitizer_syscall_pre_impl_compat_50_kevent(                               \
-      (long long)(fd), (long long)(changelist), (long long)(nchanges),         \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_post_compat_50_kevent(                             \
-    res, fd, changelist, nchanges, eventlist, nevents, timeout)                \
-  __sanitizer_syscall_post_impl_compat_50_kevent(                              \
-      res, (long long)(fd), (long long)(changelist), (long long)(nchanges),    \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_pre__sched_setparam(pid, lid, policy, params)      \
-  __sanitizer_syscall_pre_impl__sched_setparam(                                \
-      (long long)(pid), (long long)(lid), (long long)(policy),                 \
-      (long long)(params))
-#define __sanitizer_syscall_post__sched_setparam(res, pid, lid, policy,        \
-                                                 params)                       \
-  __sanitizer_syscall_post_impl__sched_setparam(                               \
-      res, (long long)(pid), (long long)(lid), (long long)(policy),            \
-      (long long)(params))
-#define __sanitizer_syscall_pre__sched_getparam(pid, lid, policy, params)      \
-  __sanitizer_syscall_pre_impl__sched_getparam(                                \
-      (long long)(pid), (long long)(lid), (long long)(policy),                 \
-      (long long)(params))
-#define __sanitizer_syscall_post__sched_getparam(res, pid, lid, policy,        \
-                                                 params)                       \
-  __sanitizer_syscall_post_impl__sched_getparam(                               \
-      res, (long long)(pid), (long long)(lid), (long long)(policy),            \
-      (long long)(params))
-#define __sanitizer_syscall_pre__sched_setaffinity(pid, lid, size, cpuset)     \
-  __sanitizer_syscall_pre_impl__sched_setaffinity(                             \
-      (long long)(pid), (long long)(lid), (long long)(size),                   \
-      (long long)(cpuset))
-#define __sanitizer_syscall_post__sched_setaffinity(res, pid, lid, size,       \
-                                                    cpuset)                    \
-  __sanitizer_syscall_post_impl__sched_setaffinity(                            \
-      res, (long long)(pid), (long long)(lid), (long long)(size),              \
-      (long long)(cpuset))
-#define __sanitizer_syscall_pre__sched_getaffinity(pid, lid, size, cpuset)     \
-  __sanitizer_syscall_pre_impl__sched_getaffinity(                             \
-      (long long)(pid), (long long)(lid), (long long)(size),                   \
-      (long long)(cpuset))
-#define __sanitizer_syscall_post__sched_getaffinity(res, pid, lid, size,       \
-                                                    cpuset)                    \
-  __sanitizer_syscall_post_impl__sched_getaffinity(                            \
-      res, (long long)(pid), (long long)(lid), (long long)(size),              \
-      (long long)(cpuset))
-#define __sanitizer_syscall_pre_sched_yield()                                  \
-  __sanitizer_syscall_pre_impl_sched_yield()
-#define __sanitizer_syscall_post_sched_yield(res)                              \
-  __sanitizer_syscall_post_impl_sched_yield(res)
-#define __sanitizer_syscall_pre__sched_protect(priority)                       \
-  __sanitizer_syscall_pre_impl__sched_protect((long long)(priority))
-#define __sanitizer_syscall_post__sched_protect(res, priority)                 \
-  __sanitizer_syscall_post_impl__sched_protect(res, (long long)(priority))
-/* syscall 352 has been skipped */
-/* syscall 353 has been skipped */
-#define __sanitizer_syscall_pre_fsync_range(fd, flags, start, length)          \
-  __sanitizer_syscall_pre_impl_fsync_range(                                    \
-      (long long)(fd), (long long)(flags), (long long)(start),                 \
-      (long long)(length))
-#define __sanitizer_syscall_post_fsync_range(res, fd, flags, start, length)    \
-  __sanitizer_syscall_post_impl_fsync_range(                                   \
-      res, (long long)(fd), (long long)(flags), (long long)(start),            \
-      (long long)(length))
-#define __sanitizer_syscall_pre_uuidgen(store, count)                          \
-  __sanitizer_syscall_pre_impl_uuidgen((long long)(store), (long long)(count))
-#define __sanitizer_syscall_post_uuidgen(res, store, count)                    \
-  __sanitizer_syscall_post_impl_uuidgen(res, (long long)(store),               \
-                                        (long long)(count))
-#define __sanitizer_syscall_pre_compat_90_getvfsstat(buf, bufsize, flags)      \
-  __sanitizer_syscall_pre_impl_compat_90_getvfsstat(                           \
-      (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_post_compat_90_getvfsstat(res, buf, bufsize,       \
-                                                      flags)                   \
-  __sanitizer_syscall_post_impl_compat_90_getvfsstat(                          \
-      res, (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_90_statvfs1(path, buf, flags)           \
-  __sanitizer_syscall_pre_impl_compat_90_statvfs1(                             \
-      (long long)(path), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_post_compat_90_statvfs1(res, path, buf, flags)     \
-  __sanitizer_syscall_post_impl_compat_90_statvfs1(                            \
-      res, (long long)(path), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_90_fstatvfs1(fd, buf, flags)            \
-  __sanitizer_syscall_pre_impl_compat_90_fstatvfs1(                            \
-      (long long)(fd), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_post_compat_90_fstatvfs1(res, fd, buf, flags)      \
-  __sanitizer_syscall_post_impl_compat_90_fstatvfs1(                           \
-      res, (long long)(fd), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_30_fhstatvfs1(fhp, buf, flags)          \
-  __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(                           \
-      (long long)(fhp), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_post_compat_30_fhstatvfs1(res, fhp, buf, flags)    \
-  __sanitizer_syscall_post_impl_compat_30_fhstatvfs1(                          \
-      res, (long long)(fhp), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre_extattrctl(path, cmd, filename, attrnamespace, \
-                                           attrname)                           \
-  __sanitizer_syscall_pre_impl_extattrctl(                                     \
-      (long long)(path), (long long)(cmd), (long long)(filename),              \
-      (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattrctl(res, path, cmd, filename,          \
-                                            attrnamespace, attrname)           \
-  __sanitizer_syscall_post_impl_extattrctl(                                    \
-      res, (long long)(path), (long long)(cmd), (long long)(filename),         \
-      (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_set_file(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_set_file(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_set_file(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_set_file(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_get_file(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_get_file(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_get_file(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_get_file(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_delete_file(path, attrnamespace,       \
-                                                    attrname)                  \
-  __sanitizer_syscall_pre_impl_extattr_delete_file(                            \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattr_delete_file(res, path, attrnamespace, \
-                                                     attrname)                 \
-  __sanitizer_syscall_post_impl_extattr_delete_file(                           \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_set_fd(fd, attrnamespace, attrname,    \
-                                               data, nbytes)                   \
-  __sanitizer_syscall_pre_impl_extattr_set_fd(                                 \
-      (long long)(fd), (long long)(attrnamespace), (long long)(attrname),      \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_set_fd(res, fd, attrnamespace,        \
-                                                attrname, data, nbytes)        \
-  __sanitizer_syscall_post_impl_extattr_set_fd(                                \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_get_fd(fd, attrnamespace, attrname,    \
-                                               data, nbytes)                   \
-  __sanitizer_syscall_pre_impl_extattr_get_fd(                                 \
-      (long long)(fd), (long long)(attrnamespace), (long long)(attrname),      \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_get_fd(res, fd, attrnamespace,        \
-                                                attrname, data, nbytes)        \
-  __sanitizer_syscall_post_impl_extattr_get_fd(                                \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_delete_fd(fd, attrnamespace, attrname) \
-  __sanitizer_syscall_pre_impl_extattr_delete_fd(                              \
-      (long long)(fd), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattr_delete_fd(res, fd, attrnamespace,     \
-                                                   attrname)                   \
-  __sanitizer_syscall_post_impl_extattr_delete_fd(                             \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_set_link(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_set_link(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_set_link(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_set_link(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_get_link(path, attrnamespace,          \
-                                                 attrname, data, nbytes)       \
-  __sanitizer_syscall_pre_impl_extattr_get_link(                               \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname),    \
-      (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_get_link(res, path, attrnamespace,    \
-                                                  attrname, data, nbytes)      \
-  __sanitizer_syscall_post_impl_extattr_get_link(                              \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname), (long long)(data), (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_delete_link(path, attrnamespace,       \
-                                                    attrname)                  \
-  __sanitizer_syscall_pre_impl_extattr_delete_link(                            \
-      (long long)(path), (long long)(attrnamespace), (long long)(attrname))
-#define __sanitizer_syscall_post_extattr_delete_link(res, path, attrnamespace, \
-                                                     attrname)                 \
-  __sanitizer_syscall_post_impl_extattr_delete_link(                           \
-      res, (long long)(path), (long long)(attrnamespace),                      \
-      (long long)(attrname))
-#define __sanitizer_syscall_pre_extattr_list_fd(fd, attrnamespace, data,       \
-                                                nbytes)                        \
-  __sanitizer_syscall_pre_impl_extattr_list_fd(                                \
-      (long long)(fd), (long long)(attrnamespace), (long long)(data),          \
-      (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_list_fd(res, fd, attrnamespace, data, \
-                                                 nbytes)                       \
-  __sanitizer_syscall_post_impl_extattr_list_fd(                               \
-      res, (long long)(fd), (long long)(attrnamespace), (long long)(data),     \
-      (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_list_file(path, attrnamespace, data,   \
-                                                  nbytes)                      \
-  __sanitizer_syscall_pre_impl_extattr_list_file(                              \
-      (long long)(path), (long long)(attrnamespace), (long long)(data),        \
-      (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_list_file(res, path, attrnamespace,   \
-                                                   data, nbytes)               \
-  __sanitizer_syscall_post_impl_extattr_list_file(                             \
-      res, (long long)(path), (long long)(attrnamespace), (long long)(data),   \
-      (long long)(nbytes))
-#define __sanitizer_syscall_pre_extattr_list_link(path, attrnamespace, data,   \
-                                                  nbytes)                      \
-  __sanitizer_syscall_pre_impl_extattr_list_link(                              \
-      (long long)(path), (long long)(attrnamespace), (long long)(data),        \
-      (long long)(nbytes))
-#define __sanitizer_syscall_post_extattr_list_link(res, path, attrnamespace,   \
-                                                   data, nbytes)               \
-  __sanitizer_syscall_post_impl_extattr_list_link(                             \
-      res, (long long)(path), (long long)(attrnamespace), (long long)(data),   \
-      (long long)(nbytes))
-#define __sanitizer_syscall_pre_compat_50_pselect(nd, in, ou, ex, ts, mask)    \
-  __sanitizer_syscall_pre_impl_compat_50_pselect(                              \
-      (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex),      \
-      (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post_compat_50_pselect(res, nd, in, ou, ex, ts,    \
-                                                   mask)                       \
-  __sanitizer_syscall_post_impl_compat_50_pselect(                             \
-      res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
-      (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_pre_compat_50_pollts(fds, nfds, ts, mask)          \
-  __sanitizer_syscall_pre_impl_compat_50_pollts(                               \
-      (long long)(fds), (long long)(nfds), (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post_compat_50_pollts(res, fds, nfds, ts, mask)    \
-  __sanitizer_syscall_post_impl_compat_50_pollts(                              \
-      res, (long long)(fds), (long long)(nfds), (long long)(ts),               \
-      (long long)(mask))
-#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags)       \
-  __sanitizer_syscall_pre_impl_setxattr((long long)(path), (long long)(name),  \
-                                        (long long)(value), (long long)(size), \
-                                        (long long)(flags))
-#define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \
-  __sanitizer_syscall_post_impl_setxattr(                                      \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags)      \
-  __sanitizer_syscall_pre_impl_lsetxattr(                                      \
-      (long long)(path), (long long)(name), (long long)(value),                \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size,       \
-                                           flags)                              \
-  __sanitizer_syscall_post_impl_lsetxattr(                                     \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags)        \
-  __sanitizer_syscall_pre_impl_fsetxattr(                                      \
-      (long long)(fd), (long long)(name), (long long)(value),                  \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags)  \
-  __sanitizer_syscall_post_impl_fsetxattr(                                     \
-      res, (long long)(fd), (long long)(name), (long long)(value),             \
-      (long long)(size), (long long)(flags))
-#define __sanitizer_syscall_pre_getxattr(path, name, value, size)              \
-  __sanitizer_syscall_pre_impl_getxattr((long long)(path), (long long)(name),  \
-                                        (long long)(value), (long long)(size))
-#define __sanitizer_syscall_post_getxattr(res, path, name, value, size)        \
-  __sanitizer_syscall_post_impl_getxattr(                                      \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size))
-#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size)             \
-  __sanitizer_syscall_pre_impl_lgetxattr((long long)(path), (long long)(name), \
-                                         (long long)(value),                   \
-                                         (long long)(size))
-#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size)       \
-  __sanitizer_syscall_post_impl_lgetxattr(                                     \
-      res, (long long)(path), (long long)(name), (long long)(value),           \
-      (long long)(size))
-#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size)               \
-  __sanitizer_syscall_pre_impl_fgetxattr((long long)(fd), (long long)(name),   \
-                                         (long long)(value),                   \
-                                         (long long)(size))
-#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size)         \
-  __sanitizer_syscall_post_impl_fgetxattr(                                     \
-      res, (long long)(fd), (long long)(name), (long long)(value),             \
-      (long long)(size))
-#define __sanitizer_syscall_pre_listxattr(path, list, size)                    \
-  __sanitizer_syscall_pre_impl_listxattr((long long)(path), (long long)(list), \
-                                         (long long)(size))
-#define __sanitizer_syscall_post_listxattr(res, path, list, size)              \
-  __sanitizer_syscall_post_impl_listxattr(                                     \
-      res, (long long)(path), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_pre_llistxattr(path, list, size)                   \
-  __sanitizer_syscall_pre_impl_llistxattr(                                     \
-      (long long)(path), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_post_llistxattr(res, path, list, size)             \
-  __sanitizer_syscall_post_impl_llistxattr(                                    \
-      res, (long long)(path), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_pre_flistxattr(fd, list, size)                     \
-  __sanitizer_syscall_pre_impl_flistxattr((long long)(fd), (long long)(list),  \
-                                          (long long)(size))
-#define __sanitizer_syscall_post_flistxattr(res, fd, list, size)               \
-  __sanitizer_syscall_post_impl_flistxattr(                                    \
-      res, (long long)(fd), (long long)(list), (long long)(size))
-#define __sanitizer_syscall_pre_removexattr(path, name)                        \
-  __sanitizer_syscall_pre_impl_removexattr((long long)(path), (long long)(name))
-#define __sanitizer_syscall_post_removexattr(res, path, name)                  \
-  __sanitizer_syscall_post_impl_removexattr(res, (long long)(path),            \
-                                            (long long)(name))
-#define __sanitizer_syscall_pre_lremovexattr(path, name)                       \
-  __sanitizer_syscall_pre_impl_lremovexattr((long long)(path),                 \
-                                            (long long)(name))
-#define __sanitizer_syscall_post_lremovexattr(res, path, name)                 \
-  __sanitizer_syscall_post_impl_lremovexattr(res, (long long)(path),           \
-                                             (long long)(name))
-#define __sanitizer_syscall_pre_fremovexattr(fd, name)                         \
-  __sanitizer_syscall_pre_impl_fremovexattr((long long)(fd), (long long)(name))
-#define __sanitizer_syscall_post_fremovexattr(res, fd, name)                   \
-  __sanitizer_syscall_post_impl_fremovexattr(res, (long long)(fd),             \
-                                             (long long)(name))
-#define __sanitizer_syscall_pre_compat_50___stat30(path, ub)                   \
-  __sanitizer_syscall_pre_impl_compat_50___stat30((long long)(path),           \
-                                                  (long long)(ub))
-#define __sanitizer_syscall_post_compat_50___stat30(res, path, ub)             \
-  __sanitizer_syscall_post_impl_compat_50___stat30(res, (long long)(path),     \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_pre_compat_50___fstat30(fd, sb)                    \
-  __sanitizer_syscall_pre_impl_compat_50___fstat30((long long)(fd),            \
-                                                   (long long)(sb))
-#define __sanitizer_syscall_post_compat_50___fstat30(res, fd, sb)              \
-  __sanitizer_syscall_post_impl_compat_50___fstat30(res, (long long)(fd),      \
-                                                    (long long)(sb))
-#define __sanitizer_syscall_pre_compat_50___lstat30(path, ub)                  \
-  __sanitizer_syscall_pre_impl_compat_50___lstat30((long long)(path),          \
-                                                   (long long)(ub))
-#define __sanitizer_syscall_post_compat_50___lstat30(res, path, ub)            \
-  __sanitizer_syscall_post_impl_compat_50___lstat30(res, (long long)(path),    \
-                                                    (long long)(ub))
-#define __sanitizer_syscall_pre___getdents30(fd, buf, count)                   \
-  __sanitizer_syscall_pre_impl___getdents30((long long)(fd), (long long)(buf), \
-                                            (long long)(count))
-#define __sanitizer_syscall_post___getdents30(res, fd, buf, count)             \
-  __sanitizer_syscall_post_impl___getdents30(                                  \
-      res, (long long)(fd), (long long)(buf), (long long)(count))
-#define __sanitizer_syscall_pre_posix_fadvise()                                \
-  __sanitizer_syscall_pre_impl_posix_fadvise((long long)())
-#define __sanitizer_syscall_post_posix_fadvise(res)                            \
-  __sanitizer_syscall_post_impl_posix_fadvise(res, (long long)())
-#define __sanitizer_syscall_pre_compat_30___fhstat30(fhp, sb)                  \
-  __sanitizer_syscall_pre_impl_compat_30___fhstat30((long long)(fhp),          \
-                                                    (long long)(sb))
-#define __sanitizer_syscall_post_compat_30___fhstat30(res, fhp, sb)            \
-  __sanitizer_syscall_post_impl_compat_30___fhstat30(res, (long long)(fhp),    \
-                                                     (long long)(sb))
-#define __sanitizer_syscall_pre_compat_50___ntp_gettime30(ntvp)                \
-  __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30((long long)(ntvp))
-#define __sanitizer_syscall_post_compat_50___ntp_gettime30(res, ntvp)          \
-  __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(res,                 \
-                                                          (long long)(ntvp))
-#define __sanitizer_syscall_pre___socket30(domain, type, protocol)             \
-  __sanitizer_syscall_pre_impl___socket30(                                     \
-      (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_post___socket30(res, domain, type, protocol)       \
-  __sanitizer_syscall_post_impl___socket30(                                    \
-      res, (long long)(domain), (long long)(type), (long long)(protocol))
-#define __sanitizer_syscall_pre___getfh30(fname, fhp, fh_size)                 \
-  __sanitizer_syscall_pre_impl___getfh30((long long)(fname), (long long)(fhp), \
-                                         (long long)(fh_size))
-#define __sanitizer_syscall_post___getfh30(res, fname, fhp, fh_size)           \
-  __sanitizer_syscall_post_impl___getfh30(                                     \
-      res, (long long)(fname), (long long)(fhp), (long long)(fh_size))
-#define __sanitizer_syscall_pre___fhopen40(fhp, fh_size, flags)                \
-  __sanitizer_syscall_pre_impl___fhopen40(                                     \
-      (long long)(fhp), (long long)(fh_size), (long long)(flags))
-#define __sanitizer_syscall_post___fhopen40(res, fhp, fh_size, flags)          \
-  __sanitizer_syscall_post_impl___fhopen40(                                    \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(flags))
-#define __sanitizer_syscall_pre_compat_90_fhstatvfs1(fhp, fh_size, buf, flags) \
-  __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1(                           \
-      (long long)(fhp), (long long)(fh_size), (long long)(buf),                \
-      (long long)(flags))
-#define __sanitizer_syscall_post_compat_90_fhstatvfs1(res, fhp, fh_size, buf,  \
-                                                      flags)                   \
-  __sanitizer_syscall_post_impl_compat_90_fhstatvfs1(                          \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
-      (long long)(flags))
-#define __sanitizer_syscall_pre_compat_50___fhstat40(fhp, fh_size, sb)         \
-  __sanitizer_syscall_pre_impl_compat_50___fhstat40(                           \
-      (long long)(fhp), (long long)(fh_size), (long long)(sb))
-#define __sanitizer_syscall_post_compat_50___fhstat40(res, fhp, fh_size, sb)   \
-  __sanitizer_syscall_post_impl_compat_50___fhstat40(                          \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(sb))
-#define __sanitizer_syscall_pre_aio_cancel(fildes, aiocbp)                     \
-  __sanitizer_syscall_pre_impl_aio_cancel((long long)(fildes),                 \
-                                          (long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_cancel(res, fildes, aiocbp)               \
-  __sanitizer_syscall_post_impl_aio_cancel(res, (long long)(fildes),           \
-                                           (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_error(aiocbp)                              \
-  __sanitizer_syscall_pre_impl_aio_error((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_error(res, aiocbp)                        \
-  __sanitizer_syscall_post_impl_aio_error(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_fsync(op, aiocbp)                          \
-  __sanitizer_syscall_pre_impl_aio_fsync((long long)(op), (long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_fsync(res, op, aiocbp)                    \
-  __sanitizer_syscall_post_impl_aio_fsync(res, (long long)(op),                \
-                                          (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_read(aiocbp)                               \
-  __sanitizer_syscall_pre_impl_aio_read((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_read(res, aiocbp)                         \
-  __sanitizer_syscall_post_impl_aio_read(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_aio_return(aiocbp)                             \
-  __sanitizer_syscall_pre_impl_aio_return((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_return(res, aiocbp)                       \
-  __sanitizer_syscall_post_impl_aio_return(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_compat_50_aio_suspend(list, nent, timeout)     \
-  __sanitizer_syscall_pre_impl_compat_50_aio_suspend(                          \
-      (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_post_compat_50_aio_suspend(res, list, nent,        \
-                                                       timeout)                \
-  __sanitizer_syscall_post_impl_compat_50_aio_suspend(                         \
-      res, (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_pre_aio_write(aiocbp)                              \
-  __sanitizer_syscall_pre_impl_aio_write((long long)(aiocbp))
-#define __sanitizer_syscall_post_aio_write(res, aiocbp)                        \
-  __sanitizer_syscall_post_impl_aio_write(res, (long long)(aiocbp))
-#define __sanitizer_syscall_pre_lio_listio(mode, list, nent, sig)              \
-  __sanitizer_syscall_pre_impl_lio_listio((long long)(mode),                   \
-                                          (long long)(list),                   \
-                                          (long long)(nent), (long long)(sig))
-#define __sanitizer_syscall_post_lio_listio(res, mode, list, nent, sig)        \
-  __sanitizer_syscall_post_impl_lio_listio(                                    \
-      res, (long long)(mode), (long long)(list), (long long)(nent),            \
-      (long long)(sig))
-/* syscall 407 has been skipped */
-/* syscall 408 has been skipped */
-/* syscall 409 has been skipped */
-#define __sanitizer_syscall_pre___mount50(type, path, flags, data, data_len)   \
-  __sanitizer_syscall_pre_impl___mount50(                                      \
-      (long long)(type), (long long)(path), (long long)(flags),                \
-      (long long)(data), (long long)(data_len))
-#define __sanitizer_syscall_post___mount50(res, type, path, flags, data,       \
-                                           data_len)                           \
-  __sanitizer_syscall_post_impl___mount50(                                     \
-      res, (long long)(type), (long long)(path), (long long)(flags),           \
-      (long long)(data), (long long)(data_len))
-#define __sanitizer_syscall_pre_mremap(old_address, old_size, new_address,     \
-                                       new_size, flags)                        \
-  __sanitizer_syscall_pre_impl_mremap(                                         \
-      (long long)(old_address), (long long)(old_size),                         \
-      (long long)(new_address), (long long)(new_size), (long long)(flags))
-#define __sanitizer_syscall_post_mremap(res, old_address, old_size,            \
-                                        new_address, new_size, flags)          \
-  __sanitizer_syscall_post_impl_mremap(                                        \
-      res, (long long)(old_address), (long long)(old_size),                    \
-      (long long)(new_address), (long long)(new_size), (long long)(flags))
-#define __sanitizer_syscall_pre_pset_create(psid)                              \
-  __sanitizer_syscall_pre_impl_pset_create((long long)(psid))
-#define __sanitizer_syscall_post_pset_create(res, psid)                        \
-  __sanitizer_syscall_post_impl_pset_create(res, (long long)(psid))
-#define __sanitizer_syscall_pre_pset_destroy(psid)                             \
-  __sanitizer_syscall_pre_impl_pset_destroy((long long)(psid))
-#define __sanitizer_syscall_post_pset_destroy(res, psid)                       \
-  __sanitizer_syscall_post_impl_pset_destroy(res, (long long)(psid))
-#define __sanitizer_syscall_pre_pset_assign(psid, cpuid, opsid)                \
-  __sanitizer_syscall_pre_impl_pset_assign(                                    \
-      (long long)(psid), (long long)(cpuid), (long long)(opsid))
-#define __sanitizer_syscall_post_pset_assign(res, psid, cpuid, opsid)          \
-  __sanitizer_syscall_post_impl_pset_assign(                                   \
-      res, (long long)(psid), (long long)(cpuid), (long long)(opsid))
-#define __sanitizer_syscall_pre__pset_bind(idtype, first_id, second_id, psid,  \
-                                           opsid)                              \
-  __sanitizer_syscall_pre_impl__pset_bind(                                     \
-      (long long)(idtype), (long long)(first_id), (long long)(second_id),      \
-      (long long)(psid), (long long)(opsid))
-#define __sanitizer_syscall_post__pset_bind(res, idtype, first_id, second_id,  \
-                                            psid, opsid)                       \
-  __sanitizer_syscall_post_impl__pset_bind(                                    \
-      res, (long long)(idtype), (long long)(first_id), (long long)(second_id), \
-      (long long)(psid), (long long)(opsid))
-#define __sanitizer_syscall_pre___posix_fadvise50(fd, PAD, offset, len,        \
-                                                  advice)                      \
-  __sanitizer_syscall_pre_impl___posix_fadvise50(                              \
-      (long long)(fd), (long long)(PAD), (long long)(offset),                  \
-      (long long)(len), (long long)(advice))
-#define __sanitizer_syscall_post___posix_fadvise50(res, fd, PAD, offset, len,  \
-                                                   advice)                     \
-  __sanitizer_syscall_post_impl___posix_fadvise50(                             \
-      res, (long long)(fd), (long long)(PAD), (long long)(offset),             \
-      (long long)(len), (long long)(advice))
-#define __sanitizer_syscall_pre___select50(nd, in, ou, ex, tv)                 \
-  __sanitizer_syscall_pre_impl___select50((long long)(nd), (long long)(in),    \
-                                          (long long)(ou), (long long)(ex),    \
-                                          (long long)(tv))
-#define __sanitizer_syscall_post___select50(res, nd, in, ou, ex, tv)           \
-  __sanitizer_syscall_post_impl___select50(res, (long long)(nd),               \
-                                           (long long)(in), (long long)(ou),   \
-                                           (long long)(ex), (long long)(tv))
-#define __sanitizer_syscall_pre___gettimeofday50(tp, tzp)                      \
-  __sanitizer_syscall_pre_impl___gettimeofday50((long long)(tp),               \
-                                                (long long)(tzp))
-#define __sanitizer_syscall_post___gettimeofday50(res, tp, tzp)                \
-  __sanitizer_syscall_post_impl___gettimeofday50(res, (long long)(tp),         \
-                                                 (long long)(tzp))
-#define __sanitizer_syscall_pre___settimeofday50(tv, tzp)                      \
-  __sanitizer_syscall_pre_impl___settimeofday50((long long)(tv),               \
-                                                (long long)(tzp))
-#define __sanitizer_syscall_post___settimeofday50(res, tv, tzp)                \
-  __sanitizer_syscall_post_impl___settimeofday50(res, (long long)(tv),         \
-                                                 (long long)(tzp))
-#define __sanitizer_syscall_pre___utimes50(path, tptr)                         \
-  __sanitizer_syscall_pre_impl___utimes50((long long)(path), (long long)(tptr))
-#define __sanitizer_syscall_post___utimes50(res, path, tptr)                   \
-  __sanitizer_syscall_post_impl___utimes50(res, (long long)(path),             \
-                                           (long long)(tptr))
-#define __sanitizer_syscall_pre___adjtime50(delta, olddelta)                   \
-  __sanitizer_syscall_pre_impl___adjtime50((long long)(delta),                 \
-                                           (long long)(olddelta))
-#define __sanitizer_syscall_post___adjtime50(res, delta, olddelta)             \
-  __sanitizer_syscall_post_impl___adjtime50(res, (long long)(delta),           \
-                                            (long long)(olddelta))
-#define __sanitizer_syscall_pre___lfs_segwait50(fsidp, tv)                     \
-  __sanitizer_syscall_pre_impl___lfs_segwait50((long long)(fsidp),             \
-                                               (long long)(tv))
-#define __sanitizer_syscall_post___lfs_segwait50(res, fsidp, tv)               \
-  __sanitizer_syscall_post_impl___lfs_segwait50(res, (long long)(fsidp),       \
-                                                (long long)(tv))
-#define __sanitizer_syscall_pre___futimes50(fd, tptr)                          \
-  __sanitizer_syscall_pre_impl___futimes50((long long)(fd), (long long)(tptr))
-#define __sanitizer_syscall_post___futimes50(res, fd, tptr)                    \
-  __sanitizer_syscall_post_impl___futimes50(res, (long long)(fd),              \
-                                            (long long)(tptr))
-#define __sanitizer_syscall_pre___lutimes50(path, tptr)                        \
-  __sanitizer_syscall_pre_impl___lutimes50((long long)(path), (long long)(tptr))
-#define __sanitizer_syscall_post___lutimes50(res, path, tptr)                  \
-  __sanitizer_syscall_post_impl___lutimes50(res, (long long)(path),            \
-                                            (long long)(tptr))
-#define __sanitizer_syscall_pre___setitimer50(which, itv, oitv)                \
-  __sanitizer_syscall_pre_impl___setitimer50(                                  \
-      (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_post___setitimer50(res, which, itv, oitv)          \
-  __sanitizer_syscall_post_impl___setitimer50(                                 \
-      res, (long long)(which), (long long)(itv), (long long)(oitv))
-#define __sanitizer_syscall_pre___getitimer50(which, itv)                      \
-  __sanitizer_syscall_pre_impl___getitimer50((long long)(which),               \
-                                             (long long)(itv))
-#define __sanitizer_syscall_post___getitimer50(res, which, itv)                \
-  __sanitizer_syscall_post_impl___getitimer50(res, (long long)(which),         \
-                                              (long long)(itv))
-#define __sanitizer_syscall_pre___clock_gettime50(clock_id, tp)                \
-  __sanitizer_syscall_pre_impl___clock_gettime50((long long)(clock_id),        \
-                                                 (long long)(tp))
-#define __sanitizer_syscall_post___clock_gettime50(res, clock_id, tp)          \
-  __sanitizer_syscall_post_impl___clock_gettime50(res, (long long)(clock_id),  \
-                                                  (long long)(tp))
-#define __sanitizer_syscall_pre___clock_settime50(clock_id, tp)                \
-  __sanitizer_syscall_pre_impl___clock_settime50((long long)(clock_id),        \
-                                                 (long long)(tp))
-#define __sanitizer_syscall_post___clock_settime50(res, clock_id, tp)          \
-  __sanitizer_syscall_post_impl___clock_settime50(res, (long long)(clock_id),  \
-                                                  (long long)(tp))
-#define __sanitizer_syscall_pre___clock_getres50(clock_id, tp)                 \
-  __sanitizer_syscall_pre_impl___clock_getres50((long long)(clock_id),         \
-                                                (long long)(tp))
-#define __sanitizer_syscall_post___clock_getres50(res, clock_id, tp)           \
-  __sanitizer_syscall_post_impl___clock_getres50(res, (long long)(clock_id),   \
-                                                 (long long)(tp))
-#define __sanitizer_syscall_pre___nanosleep50(rqtp, rmtp)                      \
-  __sanitizer_syscall_pre_impl___nanosleep50((long long)(rqtp),                \
-                                             (long long)(rmtp))
-#define __sanitizer_syscall_post___nanosleep50(res, rqtp, rmtp)                \
-  __sanitizer_syscall_post_impl___nanosleep50(res, (long long)(rqtp),          \
-                                              (long long)(rmtp))
-#define __sanitizer_syscall_pre_____sigtimedwait50(set, info, timeout)         \
-  __sanitizer_syscall_pre_impl_____sigtimedwait50(                             \
-      (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_post_____sigtimedwait50(res, set, info, timeout)   \
-  __sanitizer_syscall_post_impl_____sigtimedwait50(                            \
-      res, (long long)(set), (long long)(info), (long long)(timeout))
-#define __sanitizer_syscall_pre___mq_timedsend50(mqdes, msg_ptr, msg_len,      \
-                                                 msg_prio, abs_timeout)        \
-  __sanitizer_syscall_pre_impl___mq_timedsend50(                               \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post___mq_timedsend50(                             \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl___mq_timedsend50(                              \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_pre___mq_timedreceive50(mqdes, msg_ptr, msg_len,   \
-                                                    msg_prio, abs_timeout)     \
-  __sanitizer_syscall_pre_impl___mq_timedreceive50(                            \
-      (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),          \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_post___mq_timedreceive50(                          \
-    res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout)                       \
-  __sanitizer_syscall_post_impl___mq_timedreceive50(                           \
-      res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len),     \
-      (long long)(msg_prio), (long long)(abs_timeout))
-#define __sanitizer_syscall_pre_compat_60__lwp_park(ts, unpark, hint,          \
-                                                    unparkhint)                \
-  __sanitizer_syscall_pre_impl_compat_60__lwp_park(                            \
-      (long long)(ts), (long long)(unpark), (long long)(hint),                 \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_post_compat_60__lwp_park(res, ts, unpark, hint,    \
-                                                     unparkhint)               \
-  __sanitizer_syscall_post_impl_compat_60__lwp_park(                           \
-      res, (long long)(ts), (long long)(unpark), (long long)(hint),            \
-      (long long)(unparkhint))
-#define __sanitizer_syscall_pre___kevent50(fd, changelist, nchanges,           \
-                                           eventlist, nevents, timeout)        \
-  __sanitizer_syscall_pre_impl___kevent50(                                     \
-      (long long)(fd), (long long)(changelist), (long long)(nchanges),         \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_post___kevent50(res, fd, changelist, nchanges,     \
-                                            eventlist, nevents, timeout)       \
-  __sanitizer_syscall_post_impl___kevent50(                                    \
-      res, (long long)(fd), (long long)(changelist), (long long)(nchanges),    \
-      (long long)(eventlist), (long long)(nevents), (long long)(timeout))
-#define __sanitizer_syscall_pre___pselect50(nd, in, ou, ex, ts, mask)          \
-  __sanitizer_syscall_pre_impl___pselect50((long long)(nd), (long long)(in),   \
-                                           (long long)(ou), (long long)(ex),   \
-                                           (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post___pselect50(res, nd, in, ou, ex, ts, mask)    \
-  __sanitizer_syscall_post_impl___pselect50(                                   \
-      res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
-      (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_pre___pollts50(fds, nfds, ts, mask)                \
-  __sanitizer_syscall_pre_impl___pollts50((long long)(fds), (long long)(nfds), \
-                                          (long long)(ts), (long long)(mask))
-#define __sanitizer_syscall_post___pollts50(res, fds, nfds, ts, mask)          \
-  __sanitizer_syscall_post_impl___pollts50(res, (long long)(fds),              \
-                                           (long long)(nfds), (long long)(ts), \
-                                           (long long)(mask))
-#define __sanitizer_syscall_pre___aio_suspend50(list, nent, timeout)           \
-  __sanitizer_syscall_pre_impl___aio_suspend50(                                \
-      (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_post___aio_suspend50(res, list, nent, timeout)     \
-  __sanitizer_syscall_post_impl___aio_suspend50(                               \
-      res, (long long)(list), (long long)(nent), (long long)(timeout))
-#define __sanitizer_syscall_pre___stat50(path, ub)                             \
-  __sanitizer_syscall_pre_impl___stat50((long long)(path), (long long)(ub))
-#define __sanitizer_syscall_post___stat50(res, path, ub)                       \
-  __sanitizer_syscall_post_impl___stat50(res, (long long)(path),               \
-                                         (long long)(ub))
-#define __sanitizer_syscall_pre___fstat50(fd, sb)                              \
-  __sanitizer_syscall_pre_impl___fstat50((long long)(fd), (long long)(sb))
-#define __sanitizer_syscall_post___fstat50(res, fd, sb)                        \
-  __sanitizer_syscall_post_impl___fstat50(res, (long long)(fd), (long long)(sb))
-#define __sanitizer_syscall_pre___lstat50(path, ub)                            \
-  __sanitizer_syscall_pre_impl___lstat50((long long)(path), (long long)(ub))
-#define __sanitizer_syscall_post___lstat50(res, path, ub)                      \
-  __sanitizer_syscall_post_impl___lstat50(res, (long long)(path),              \
-                                          (long long)(ub))
-#define __sanitizer_syscall_pre_____semctl50(semid, semnum, cmd, arg)          \
-  __sanitizer_syscall_pre_impl_____semctl50(                                   \
-      (long long)(semid), (long long)(semnum), (long long)(cmd),               \
-      (long long)(arg))
-#define __sanitizer_syscall_post_____semctl50(res, semid, semnum, cmd, arg)    \
-  __sanitizer_syscall_post_impl_____semctl50(                                  \
-      res, (long long)(semid), (long long)(semnum), (long long)(cmd),          \
-      (long long)(arg))
-#define __sanitizer_syscall_pre___shmctl50(shmid, cmd, buf)                    \
-  __sanitizer_syscall_pre_impl___shmctl50((long long)(shmid),                  \
-                                          (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post___shmctl50(res, shmid, cmd, buf)              \
-  __sanitizer_syscall_post_impl___shmctl50(res, (long long)(shmid),            \
-                                           (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre___msgctl50(msqid, cmd, buf)                    \
-  __sanitizer_syscall_pre_impl___msgctl50((long long)(msqid),                  \
-                                          (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_post___msgctl50(res, msqid, cmd, buf)              \
-  __sanitizer_syscall_post_impl___msgctl50(res, (long long)(msqid),            \
-                                           (long long)(cmd), (long long)(buf))
-#define __sanitizer_syscall_pre___getrusage50(who, rusage)                     \
-  __sanitizer_syscall_pre_impl___getrusage50((long long)(who),                 \
-                                             (long long)(rusage))
-#define __sanitizer_syscall_post___getrusage50(res, who, rusage)               \
-  __sanitizer_syscall_post_impl___getrusage50(res, (long long)(who),           \
-                                              (long long)(rusage))
-#define __sanitizer_syscall_pre___timer_settime50(timerid, flags, value,       \
-                                                  ovalue)                      \
-  __sanitizer_syscall_pre_impl___timer_settime50(                              \
-      (long long)(timerid), (long long)(flags), (long long)(value),            \
-      (long long)(ovalue))
-#define __sanitizer_syscall_post___timer_settime50(res, timerid, flags, value, \
-                                                   ovalue)                     \
-  __sanitizer_syscall_post_impl___timer_settime50(                             \
-      res, (long long)(timerid), (long long)(flags), (long long)(value),       \
-      (long long)(ovalue))
-#define __sanitizer_syscall_pre___timer_gettime50(timerid, value)              \
-  __sanitizer_syscall_pre_impl___timer_gettime50((long long)(timerid),         \
-                                                 (long long)(value))
-#define __sanitizer_syscall_post___timer_gettime50(res, timerid, value)        \
-  __sanitizer_syscall_post_impl___timer_gettime50(res, (long long)(timerid),   \
-                                                  (long long)(value))
-#if defined(NTP) || !defined(_KERNEL_OPT)
-#define __sanitizer_syscall_pre___ntp_gettime50(ntvp)                          \
-  __sanitizer_syscall_pre_impl___ntp_gettime50((long long)(ntvp))
-#define __sanitizer_syscall_post___ntp_gettime50(res, ntvp)                    \
-  __sanitizer_syscall_post_impl___ntp_gettime50(res, (long long)(ntvp))
-#else
-/* syscall 448 has been skipped */
-#endif
-#define __sanitizer_syscall_pre___wait450(pid, status, options, rusage)        \
-  __sanitizer_syscall_pre_impl___wait450(                                      \
-      (long long)(pid), (long long)(status), (long long)(options),             \
-      (long long)(rusage))
-#define __sanitizer_syscall_post___wait450(res, pid, status, options, rusage)  \
-  __sanitizer_syscall_post_impl___wait450(                                     \
-      res, (long long)(pid), (long long)(status), (long long)(options),        \
-      (long long)(rusage))
-#define __sanitizer_syscall_pre___mknod50(path, mode, dev)                     \
-  __sanitizer_syscall_pre_impl___mknod50((long long)(path), (long long)(mode), \
-                                         (long long)(dev))
-#define __sanitizer_syscall_post___mknod50(res, path, mode, dev)               \
-  __sanitizer_syscall_post_impl___mknod50(res, (long long)(path),              \
-                                          (long long)(mode), (long long)(dev))
-#define __sanitizer_syscall_pre___fhstat50(fhp, fh_size, sb)                   \
-  __sanitizer_syscall_pre_impl___fhstat50(                                     \
-      (long long)(fhp), (long long)(fh_size), (long long)(sb))
-#define __sanitizer_syscall_post___fhstat50(res, fhp, fh_size, sb)             \
-  __sanitizer_syscall_post_impl___fhstat50(                                    \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(sb))
-/* syscall 452 has been skipped */
-#define __sanitizer_syscall_pre_pipe2(fildes, flags)                           \
-  __sanitizer_syscall_pre_impl_pipe2((long long)(fildes), (long long)(flags))
-#define __sanitizer_syscall_post_pipe2(res, fildes, flags)                     \
-  __sanitizer_syscall_post_impl_pipe2(res, (long long)(fildes),                \
-                                      (long long)(flags))
-#define __sanitizer_syscall_pre_dup3(from, to, flags)                          \
-  __sanitizer_syscall_pre_impl_dup3((long long)(from), (long long)(to),        \
-                                    (long long)(flags))
-#define __sanitizer_syscall_post_dup3(res, from, to, flags)                    \
-  __sanitizer_syscall_post_impl_dup3(res, (long long)(from), (long long)(to),  \
-                                     (long long)(flags))
-#define __sanitizer_syscall_pre_kqueue1(flags)                                 \
-  __sanitizer_syscall_pre_impl_kqueue1((long long)(flags))
-#define __sanitizer_syscall_post_kqueue1(res, flags)                           \
-  __sanitizer_syscall_post_impl_kqueue1(res, (long long)(flags))
-#define __sanitizer_syscall_pre_paccept(s, name, anamelen, mask, flags)        \
-  __sanitizer_syscall_pre_impl_paccept((long long)(s), (long long)(name),      \
-                                       (long long)(anamelen),                  \
-                                       (long long)(mask), (long long)(flags))
-#define __sanitizer_syscall_post_paccept(res, s, name, anamelen, mask, flags)  \
-  __sanitizer_syscall_post_impl_paccept(                                       \
-      res, (long long)(s), (long long)(name), (long long)(anamelen),           \
-      (long long)(mask), (long long)(flags))
-#define __sanitizer_syscall_pre_linkat(fd1, name1, fd2, name2, flags)          \
-  __sanitizer_syscall_pre_impl_linkat((long long)(fd1), (long long)(name1),    \
-                                      (long long)(fd2), (long long)(name2),    \
-                                      (long long)(flags))
-#define __sanitizer_syscall_post_linkat(res, fd1, name1, fd2, name2, flags)    \
-  __sanitizer_syscall_post_impl_linkat(res, (long long)(fd1),                  \
-                                       (long long)(name1), (long long)(fd2),   \
-                                       (long long)(name2), (long long)(flags))
-#define __sanitizer_syscall_pre_renameat(fromfd, from, tofd, to)               \
-  __sanitizer_syscall_pre_impl_renameat((long long)(fromfd),                   \
-                                        (long long)(from), (long long)(tofd),  \
-                                        (long long)(to))
-#define __sanitizer_syscall_post_renameat(res, fromfd, from, tofd, to)         \
-  __sanitizer_syscall_post_impl_renameat(res, (long long)(fromfd),             \
-                                         (long long)(from), (long long)(tofd), \
-                                         (long long)(to))
-#define __sanitizer_syscall_pre_mkfifoat(fd, path, mode)                       \
-  __sanitizer_syscall_pre_impl_mkfifoat((long long)(fd), (long long)(path),    \
-                                        (long long)(mode))
-#define __sanitizer_syscall_post_mkfifoat(res, fd, path, mode)                 \
-  __sanitizer_syscall_post_impl_mkfifoat(res, (long long)(fd),                 \
-                                         (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_mknodat(fd, path, mode, PAD, dev)              \
-  __sanitizer_syscall_pre_impl_mknodat((long long)(fd), (long long)(path),     \
-                                       (long long)(mode), (long long)(PAD),    \
-                                       (long long)(dev))
-#define __sanitizer_syscall_post_mknodat(res, fd, path, mode, PAD, dev)        \
-  __sanitizer_syscall_post_impl_mknodat(res, (long long)(fd),                  \
-                                        (long long)(path), (long long)(mode),  \
-                                        (long long)(PAD), (long long)(dev))
-#define __sanitizer_syscall_pre_mkdirat(fd, path, mode)                        \
-  __sanitizer_syscall_pre_impl_mkdirat((long long)(fd), (long long)(path),     \
-                                       (long long)(mode))
-#define __sanitizer_syscall_post_mkdirat(res, fd, path, mode)                  \
-  __sanitizer_syscall_post_impl_mkdirat(res, (long long)(fd),                  \
-                                        (long long)(path), (long long)(mode))
-#define __sanitizer_syscall_pre_faccessat(fd, path, amode, flag)               \
-  __sanitizer_syscall_pre_impl_faccessat((long long)(fd), (long long)(path),   \
-                                         (long long)(amode),                   \
-                                         (long long)(flag))
-#define __sanitizer_syscall_post_faccessat(res, fd, path, amode, flag)         \
-  __sanitizer_syscall_post_impl_faccessat(                                     \
-      res, (long long)(fd), (long long)(path), (long long)(amode),             \
-      (long long)(flag))
-#define __sanitizer_syscall_pre_fchmodat(fd, path, mode, flag)                 \
-  __sanitizer_syscall_pre_impl_fchmodat((long long)(fd), (long long)(path),    \
-                                        (long long)(mode), (long long)(flag))
-#define __sanitizer_syscall_post_fchmodat(res, fd, path, mode, flag)           \
-  __sanitizer_syscall_post_impl_fchmodat(res, (long long)(fd),                 \
-                                         (long long)(path), (long long)(mode), \
-                                         (long long)(flag))
-#define __sanitizer_syscall_pre_fchownat(fd, path, owner, group, flag)         \
-  __sanitizer_syscall_pre_impl_fchownat((long long)(fd), (long long)(path),    \
-                                        (long long)(owner),                    \
-                                        (long long)(group), (long long)(flag))
-#define __sanitizer_syscall_post_fchownat(res, fd, path, owner, group, flag)   \
-  __sanitizer_syscall_post_impl_fchownat(                                      \
-      res, (long long)(fd), (long long)(path), (long long)(owner),             \
-      (long long)(group), (long long)(flag))
-#define __sanitizer_syscall_pre_fexecve(fd, argp, envp)                        \
-  __sanitizer_syscall_pre_impl_fexecve((long long)(fd), (long long)(argp),     \
-                                       (long long)(envp))
-#define __sanitizer_syscall_post_fexecve(res, fd, argp, envp)                  \
-  __sanitizer_syscall_post_impl_fexecve(res, (long long)(fd),                  \
-                                        (long long)(argp), (long long)(envp))
-#define __sanitizer_syscall_pre_fstatat(fd, path, buf, flag)                   \
-  __sanitizer_syscall_pre_impl_fstatat((long long)(fd), (long long)(path),     \
-                                       (long long)(buf), (long long)(flag))
-#define __sanitizer_syscall_post_fstatat(res, fd, path, buf, flag)             \
-  __sanitizer_syscall_post_impl_fstatat(res, (long long)(fd),                  \
-                                        (long long)(path), (long long)(buf),   \
-                                        (long long)(flag))
-#define __sanitizer_syscall_pre_utimensat(fd, path, tptr, flag)                \
-  __sanitizer_syscall_pre_impl_utimensat((long long)(fd), (long long)(path),   \
-                                         (long long)(tptr), (long long)(flag))
-#define __sanitizer_syscall_post_utimensat(res, fd, path, tptr, flag)          \
-  __sanitizer_syscall_post_impl_utimensat(                                     \
-      res, (long long)(fd), (long long)(path), (long long)(tptr),              \
-      (long long)(flag))
-#define __sanitizer_syscall_pre_openat(fd, path, oflags, mode)                 \
-  __sanitizer_syscall_pre_impl_openat((long long)(fd), (long long)(path),      \
-                                      (long long)(oflags), (long long)(mode))
-#define __sanitizer_syscall_post_openat(res, fd, path, oflags, mode)           \
-  __sanitizer_syscall_post_impl_openat(res, (long long)(fd),                   \
-                                       (long long)(path), (long long)(oflags), \
-                                       (long long)(mode))
-#define __sanitizer_syscall_pre_readlinkat(fd, path, buf, bufsize)             \
-  __sanitizer_syscall_pre_impl_readlinkat((long long)(fd), (long long)(path),  \
-                                          (long long)(buf),                    \
-                                          (long long)(bufsize))
-#define __sanitizer_syscall_post_readlinkat(res, fd, path, buf, bufsize)       \
-  __sanitizer_syscall_post_impl_readlinkat(                                    \
-      res, (long long)(fd), (long long)(path), (long long)(buf),               \
-      (long long)(bufsize))
-#define __sanitizer_syscall_pre_symlinkat(path1, fd, path2)                    \
-  __sanitizer_syscall_pre_impl_symlinkat((long long)(path1), (long long)(fd),  \
-                                         (long long)(path2))
-#define __sanitizer_syscall_post_symlinkat(res, path1, fd, path2)              \
-  __sanitizer_syscall_post_impl_symlinkat(res, (long long)(path1),             \
-                                          (long long)(fd), (long long)(path2))
-#define __sanitizer_syscall_pre_unlinkat(fd, path, flag)                       \
-  __sanitizer_syscall_pre_impl_unlinkat((long long)(fd), (long long)(path),    \
-                                        (long long)(flag))
-#define __sanitizer_syscall_post_unlinkat(res, fd, path, flag)                 \
-  __sanitizer_syscall_post_impl_unlinkat(res, (long long)(fd),                 \
-                                         (long long)(path), (long long)(flag))
-#define __sanitizer_syscall_pre_futimens(fd, tptr)                             \
-  __sanitizer_syscall_pre_impl_futimens((long long)(fd), (long long)(tptr))
-#define __sanitizer_syscall_post_futimens(res, fd, tptr)                       \
-  __sanitizer_syscall_post_impl_futimens(res, (long long)(fd),                 \
-                                         (long long)(tptr))
-#define __sanitizer_syscall_pre___quotactl(path, args)                         \
-  __sanitizer_syscall_pre_impl___quotactl((long long)(path), (long long)(args))
-#define __sanitizer_syscall_post___quotactl(res, path, args)                   \
-  __sanitizer_syscall_post_impl___quotactl(res, (long long)(path),             \
-                                           (long long)(args))
-#define __sanitizer_syscall_pre_posix_spawn(pid, path, file_actions, attrp,    \
-                                            argv, envp)                        \
-  __sanitizer_syscall_pre_impl_posix_spawn(                                    \
-      (long long)(pid), (long long)(path), (long long)(file_actions),          \
-      (long long)(attrp), (long long)(argv), (long long)(envp))
-#define __sanitizer_syscall_post_posix_spawn(res, pid, path, file_actions,     \
-                                             attrp, argv, envp)                \
-  __sanitizer_syscall_post_impl_posix_spawn(                                   \
-      res, (long long)(pid), (long long)(path), (long long)(file_actions),     \
-      (long long)(attrp), (long long)(argv), (long long)(envp))
-#define __sanitizer_syscall_pre_recvmmsg(s, mmsg, vlen, flags, timeout)        \
-  __sanitizer_syscall_pre_impl_recvmmsg((long long)(s), (long long)(mmsg),     \
-                                        (long long)(vlen), (long long)(flags), \
-                                        (long long)(timeout))
-#define __sanitizer_syscall_post_recvmmsg(res, s, mmsg, vlen, flags, timeout)  \
-  __sanitizer_syscall_post_impl_recvmmsg(                                      \
-      res, (long long)(s), (long long)(mmsg), (long long)(vlen),               \
-      (long long)(flags), (long long)(timeout))
-#define __sanitizer_syscall_pre_sendmmsg(s, mmsg, vlen, flags)                 \
-  __sanitizer_syscall_pre_impl_sendmmsg((long long)(s), (long long)(mmsg),     \
-                                        (long long)(vlen), (long long)(flags))
-#define __sanitizer_syscall_post_sendmmsg(res, s, mmsg, vlen, flags)           \
-  __sanitizer_syscall_post_impl_sendmmsg(res, (long long)(s),                  \
-                                         (long long)(mmsg), (long long)(vlen), \
-                                         (long long)(flags))
-#define __sanitizer_syscall_pre_clock_nanosleep(clock_id, flags, rqtp, rmtp)   \
-  __sanitizer_syscall_pre_impl_clock_nanosleep(                                \
-      (long long)(clock_id), (long long)(flags), (long long)(rqtp),            \
-      (long long)(rmtp))
-#define __sanitizer_syscall_post_clock_nanosleep(res, clock_id, flags, rqtp,   \
-                                                 rmtp)                         \
-  __sanitizer_syscall_post_impl_clock_nanosleep(                               \
-      res, (long long)(clock_id), (long long)(flags), (long long)(rqtp),       \
-      (long long)(rmtp))
-#define __sanitizer_syscall_pre____lwp_park60(clock_id, flags, ts, unpark,     \
-                                              hint, unparkhint)                \
-  __sanitizer_syscall_pre_impl____lwp_park60(                                  \
-      (long long)(clock_id), (long long)(flags), (long long)(ts),              \
-      (long long)(unpark), (long long)(hint), (long long)(unparkhint))
-#define __sanitizer_syscall_post____lwp_park60(res, clock_id, flags, ts,       \
-                                               unpark, hint, unparkhint)       \
-  __sanitizer_syscall_post_impl____lwp_park60(                                 \
-      res, (long long)(clock_id), (long long)(flags), (long long)(ts),         \
-      (long long)(unpark), (long long)(hint), (long long)(unparkhint))
-#define __sanitizer_syscall_pre_posix_fallocate(fd, PAD, pos, len)             \
-  __sanitizer_syscall_pre_impl_posix_fallocate(                                \
-      (long long)(fd), (long long)(PAD), (long long)(pos), (long long)(len))
-#define __sanitizer_syscall_post_posix_fallocate(res, fd, PAD, pos, len)       \
-  __sanitizer_syscall_post_impl_posix_fallocate(                               \
-      res, (long long)(fd), (long long)(PAD), (long long)(pos),                \
-      (long long)(len))
-#define __sanitizer_syscall_pre_fdiscard(fd, PAD, pos, len)                    \
-  __sanitizer_syscall_pre_impl_fdiscard((long long)(fd), (long long)(PAD),     \
-                                        (long long)(pos), (long long)(len))
-#define __sanitizer_syscall_post_fdiscard(res, fd, PAD, pos, len)              \
-  __sanitizer_syscall_post_impl_fdiscard(res, (long long)(fd),                 \
-                                         (long long)(PAD), (long long)(pos),   \
-                                         (long long)(len))
-#define __sanitizer_syscall_pre_wait6(idtype, id, status, options, wru, info)  \
-  __sanitizer_syscall_pre_impl_wait6(                                          \
-      (long long)(idtype), (long long)(id), (long long)(status),               \
-      (long long)(options), (long long)(wru), (long long)(info))
-#define __sanitizer_syscall_post_wait6(res, idtype, id, status, options, wru,  \
-                                       info)                                   \
-  __sanitizer_syscall_post_impl_wait6(                                         \
-      res, (long long)(idtype), (long long)(id), (long long)(status),          \
-      (long long)(options), (long long)(wru), (long long)(info))
-#define __sanitizer_syscall_pre_clock_getcpuclockid2(idtype, id, clock_id)     \
-  __sanitizer_syscall_pre_impl_clock_getcpuclockid2(                           \
-      (long long)(idtype), (long long)(id), (long long)(clock_id))
-#define __sanitizer_syscall_post_clock_getcpuclockid2(res, idtype, id,         \
-                                                      clock_id)                \
-  __sanitizer_syscall_post_impl_clock_getcpuclockid2(                          \
-      res, (long long)(idtype), (long long)(id), (long long)(clock_id))
-#define __sanitizer_syscall_pre___getvfsstat90(buf, bufsize, flags)            \
-  __sanitizer_syscall_pre_impl___getvfsstat90(                                 \
-      (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_post___getvfsstat90(res, buf, bufsize, flags)      \
-  __sanitizer_syscall_post_impl___getvfsstat90(                                \
-      res, (long long)(buf), (long long)(bufsize), (long long)(flags))
-#define __sanitizer_syscall_pre___statvfs190(path, buf, flags)                 \
-  __sanitizer_syscall_pre_impl___statvfs190(                                   \
-      (long long)(path), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_post___statvfs190(res, path, buf, flags)           \
-  __sanitizer_syscall_post_impl___statvfs190(                                  \
-      res, (long long)(path), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre___fstatvfs190(fd, buf, flags)                  \
-  __sanitizer_syscall_pre_impl___fstatvfs190(                                  \
-      (long long)(fd), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_post___fstatvfs190(res, fd, buf, flags)            \
-  __sanitizer_syscall_post_impl___fstatvfs190(                                 \
-      res, (long long)(fd), (long long)(buf), (long long)(flags))
-#define __sanitizer_syscall_pre___fhstatvfs190(fhp, fh_size, buf, flags)       \
-  __sanitizer_syscall_pre_impl___fhstatvfs190(                                 \
-      (long long)(fhp), (long long)(fh_size), (long long)(buf),                \
-      (long long)(flags))
-#define __sanitizer_syscall_post___fhstatvfs190(res, fhp, fh_size, buf, flags) \
-  __sanitizer_syscall_post_impl___fhstatvfs190(                                \
-      res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
-      (long long)(flags))
-
-/* Compat with older releases */
-#define __sanitizer_syscall_pre_getvfsstat                                     \
-  __sanitizer_syscall_pre_compat_90_getvfsstat
-#define __sanitizer_syscall_post_getvfsstat                                    \
-  __sanitizer_syscall_post_compat_90_getvfsstat
-
-#define __sanitizer_syscall_pre_statvfs1                                       \
-  __sanitizer_syscall_pre_compat_90_statvfs1
-#define __sanitizer_syscall_post_statvfs1                                      \
-  __sanitizer_syscall_post_compat_90_statvfs1
-
-#define __sanitizer_syscall_pre_fstatvfs1                                      \
-  __sanitizer_syscall_pre_compat_90_fstatvfs1
-#define __sanitizer_syscall_post_fstatvfs1                                     \
-  __sanitizer_syscall_post_compat_90_fstatvfs1
-
-#define __sanitizer_syscall_pre___fhstatvfs140                                 \
-  __sanitizer_syscall_pre_compat_90_fhstatvfs1
-#define __sanitizer_syscall_post___fhstatvfs140                                \
-  __sanitizer_syscall_post_compat_90_fhstatvfs1
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Private declarations. Do not call directly from user code. Use macros above.
-
-// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
-
-void __sanitizer_syscall_pre_impl_syscall(long long code, long long arg0,
-                                          long long arg1, long long arg2,
-                                          long long arg3, long long arg4,
-                                          long long arg5, long long arg6,
-                                          long long arg7);
-void __sanitizer_syscall_post_impl_syscall(long long res, long long code,
-                                           long long arg0, long long arg1,
-                                           long long arg2, long long arg3,
-                                           long long arg4, long long arg5,
-                                           long long arg6, long long arg7);
-void __sanitizer_syscall_pre_impl_exit(long long rval);
-void __sanitizer_syscall_post_impl_exit(long long res, long long rval);
-void __sanitizer_syscall_pre_impl_fork(void);
-void __sanitizer_syscall_post_impl_fork(long long res);
-void __sanitizer_syscall_pre_impl_read(long long fd, long long buf,
-                                       long long nbyte);
-void __sanitizer_syscall_post_impl_read(long long res, long long fd,
-                                        long long buf, long long nbyte);
-void __sanitizer_syscall_pre_impl_write(long long fd, long long buf,
-                                        long long nbyte);
-void __sanitizer_syscall_post_impl_write(long long res, long long fd,
-                                         long long buf, long long nbyte);
-void __sanitizer_syscall_pre_impl_open(long long path, long long flags,
-                                       long long mode);
-void __sanitizer_syscall_post_impl_open(long long res, long long path,
-                                        long long flags, long long mode);
-void __sanitizer_syscall_pre_impl_close(long long fd);
-void __sanitizer_syscall_post_impl_close(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_compat_50_wait4(long long pid,
-                                                  long long status,
-                                                  long long options,
-                                                  long long rusage);
-void __sanitizer_syscall_post_impl_compat_50_wait4(long long res, long long pid,
-                                                   long long status,
-                                                   long long options,
-                                                   long long rusage);
-void __sanitizer_syscall_pre_impl_compat_43_ocreat(long long path,
-                                                   long long mode);
-void __sanitizer_syscall_post_impl_compat_43_ocreat(long long res,
-                                                    long long path,
-                                                    long long mode);
-void __sanitizer_syscall_pre_impl_link(long long path, long long link);
-void __sanitizer_syscall_post_impl_link(long long res, long long path,
-                                        long long link);
-void __sanitizer_syscall_pre_impl_unlink(long long path);
-void __sanitizer_syscall_post_impl_unlink(long long res, long long path);
-/* syscall 11 has been skipped */
-void __sanitizer_syscall_pre_impl_chdir(long long path);
-void __sanitizer_syscall_post_impl_chdir(long long res, long long path);
-void __sanitizer_syscall_pre_impl_fchdir(long long fd);
-void __sanitizer_syscall_post_impl_fchdir(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_compat_50_mknod(long long path,
-                                                  long long mode,
-                                                  long long dev);
-void __sanitizer_syscall_post_impl_compat_50_mknod(long long res,
-                                                   long long path,
-                                                   long long mode,
-                                                   long long dev);
-void __sanitizer_syscall_pre_impl_chmod(long long path, long long mode);
-void __sanitizer_syscall_post_impl_chmod(long long res, long long path,
-                                         long long mode);
-void __sanitizer_syscall_pre_impl_chown(long long path, long long uid,
-                                        long long gid);
-void __sanitizer_syscall_post_impl_chown(long long res, long long path,
-                                         long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_break(long long nsize);
-void __sanitizer_syscall_post_impl_break(long long res, long long nsize);
-void __sanitizer_syscall_pre_impl_compat_20_getfsstat(long long buf,
-                                                      long long bufsize,
-                                                      long long flags);
-void __sanitizer_syscall_post_impl_compat_20_getfsstat(long long res,
-                                                       long long buf,
-                                                       long long bufsize,
-                                                       long long flags);
-void __sanitizer_syscall_pre_impl_compat_43_olseek(long long fd,
-                                                   long long offset,
-                                                   long long whence);
-void __sanitizer_syscall_post_impl_compat_43_olseek(long long res, long long fd,
-                                                    long long offset,
-                                                    long long whence);
-void __sanitizer_syscall_pre_impl_getpid(void);
-void __sanitizer_syscall_post_impl_getpid(long long res);
-void __sanitizer_syscall_pre_impl_compat_40_mount(long long type,
-                                                  long long path,
-                                                  long long flags,
-                                                  long long data);
-void __sanitizer_syscall_post_impl_compat_40_mount(long long res,
-                                                   long long type,
-                                                   long long path,
-                                                   long long flags,
-                                                   long long data);
-void __sanitizer_syscall_pre_impl_unmount(long long path, long long flags);
-void __sanitizer_syscall_post_impl_unmount(long long res, long long path,
-                                           long long flags);
-void __sanitizer_syscall_pre_impl_setuid(long long uid);
-void __sanitizer_syscall_post_impl_setuid(long long res, long long uid);
-void __sanitizer_syscall_pre_impl_getuid(void);
-void __sanitizer_syscall_post_impl_getuid(long long res);
-void __sanitizer_syscall_pre_impl_geteuid(void);
-void __sanitizer_syscall_post_impl_geteuid(long long res);
-void __sanitizer_syscall_pre_impl_ptrace(long long req, long long pid,
-                                         long long addr, long long data);
-void __sanitizer_syscall_post_impl_ptrace(long long res, long long req,
-                                          long long pid, long long addr,
-                                          long long data);
-void __sanitizer_syscall_pre_impl_recvmsg(long long s, long long msg,
-                                          long long flags);
-void __sanitizer_syscall_post_impl_recvmsg(long long res, long long s,
-                                           long long msg, long long flags);
-void __sanitizer_syscall_pre_impl_sendmsg(long long s, long long msg,
-                                          long long flags);
-void __sanitizer_syscall_post_impl_sendmsg(long long res, long long s,
-                                           long long msg, long long flags);
-void __sanitizer_syscall_pre_impl_recvfrom(long long s, long long buf,
-                                           long long len, long long flags,
-                                           long long from,
-                                           long long fromlenaddr);
-void __sanitizer_syscall_post_impl_recvfrom(long long res, long long s,
-                                            long long buf, long long len,
-                                            long long flags, long long from,
-                                            long long fromlenaddr);
-void __sanitizer_syscall_pre_impl_accept(long long s, long long name,
-                                         long long anamelen);
-void __sanitizer_syscall_post_impl_accept(long long res, long long s,
-                                          long long name, long long anamelen);
-void __sanitizer_syscall_pre_impl_getpeername(long long fdes, long long asa,
-                                              long long alen);
-void __sanitizer_syscall_post_impl_getpeername(long long res, long long fdes,
-                                               long long asa, long long alen);
-void __sanitizer_syscall_pre_impl_getsockname(long long fdes, long long asa,
-                                              long long alen);
-void __sanitizer_syscall_post_impl_getsockname(long long res, long long fdes,
-                                               long long asa, long long alen);
-void __sanitizer_syscall_pre_impl_access(long long path, long long flags);
-void __sanitizer_syscall_post_impl_access(long long res, long long path,
-                                          long long flags);
-void __sanitizer_syscall_pre_impl_chflags(long long path, long long flags);
-void __sanitizer_syscall_post_impl_chflags(long long res, long long path,
-                                           long long flags);
-void __sanitizer_syscall_pre_impl_fchflags(long long fd, long long flags);
-void __sanitizer_syscall_post_impl_fchflags(long long res, long long fd,
-                                            long long flags);
-void __sanitizer_syscall_pre_impl_sync(void);
-void __sanitizer_syscall_post_impl_sync(long long res);
-void __sanitizer_syscall_pre_impl_kill(long long pid, long long signum);
-void __sanitizer_syscall_post_impl_kill(long long res, long long pid,
-                                        long long signum);
-void __sanitizer_syscall_pre_impl_compat_43_stat43(long long path,
-                                                   long long ub);
-void __sanitizer_syscall_post_impl_compat_43_stat43(long long res,
-                                                    long long path,
-                                                    long long ub);
-void __sanitizer_syscall_pre_impl_getppid(void);
-void __sanitizer_syscall_post_impl_getppid(long long res);
-void __sanitizer_syscall_pre_impl_compat_43_lstat43(long long path,
-                                                    long long ub);
-void __sanitizer_syscall_post_impl_compat_43_lstat43(long long res,
-                                                     long long path,
-                                                     long long ub);
-void __sanitizer_syscall_pre_impl_dup(long long fd);
-void __sanitizer_syscall_post_impl_dup(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_pipe(void);
-void __sanitizer_syscall_post_impl_pipe(long long res);
-void __sanitizer_syscall_pre_impl_getegid(void);
-void __sanitizer_syscall_post_impl_getegid(long long res);
-void __sanitizer_syscall_pre_impl_profil(long long samples, long long size,
-                                         long long offset, long long scale);
-void __sanitizer_syscall_post_impl_profil(long long res, long long samples,
-                                          long long size, long long offset,
-                                          long long scale);
-void __sanitizer_syscall_pre_impl_ktrace(long long fname, long long ops,
-                                         long long facs, long long pid);
-void __sanitizer_syscall_post_impl_ktrace(long long res, long long fname,
-                                          long long ops, long long facs,
-                                          long long pid);
-void __sanitizer_syscall_pre_impl_compat_13_sigaction13(long long signum,
-                                                        long long nsa,
-                                                        long long osa);
-void __sanitizer_syscall_post_impl_compat_13_sigaction13(long long res,
-                                                         long long signum,
-                                                         long long nsa,
-                                                         long long osa);
-void __sanitizer_syscall_pre_impl_getgid(void);
-void __sanitizer_syscall_post_impl_getgid(long long res);
-void __sanitizer_syscall_pre_impl_compat_13_sigprocmask13(long long how,
-                                                          long long mask);
-void __sanitizer_syscall_post_impl_compat_13_sigprocmask13(long long res,
-                                                           long long how,
-                                                           long long mask);
-void __sanitizer_syscall_pre_impl___getlogin(long long namebuf,
-                                             long long namelen);
-void __sanitizer_syscall_post_impl___getlogin(long long res, long long namebuf,
-                                              long long namelen);
-void __sanitizer_syscall_pre_impl___setlogin(long long namebuf);
-void __sanitizer_syscall_post_impl___setlogin(long long res, long long namebuf);
-void __sanitizer_syscall_pre_impl_acct(long long path);
-void __sanitizer_syscall_post_impl_acct(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_13_sigpending13(void);
-void __sanitizer_syscall_post_impl_compat_13_sigpending13(long long res);
-void __sanitizer_syscall_pre_impl_compat_13_sigaltstack13(long long nss,
-                                                          long long oss);
-void __sanitizer_syscall_post_impl_compat_13_sigaltstack13(long long res,
-                                                           long long nss,
-                                                           long long oss);
-void __sanitizer_syscall_pre_impl_ioctl(long long fd, long long com,
-                                        long long data);
-void __sanitizer_syscall_post_impl_ioctl(long long res, long long fd,
-                                         long long com, long long data);
-void __sanitizer_syscall_pre_impl_compat_12_oreboot(long long opt);
-void __sanitizer_syscall_post_impl_compat_12_oreboot(long long res,
-                                                     long long opt);
-void __sanitizer_syscall_pre_impl_revoke(long long path);
-void __sanitizer_syscall_post_impl_revoke(long long res, long long path);
-void __sanitizer_syscall_pre_impl_symlink(long long path, long long link);
-void __sanitizer_syscall_post_impl_symlink(long long res, long long path,
-                                           long long link);
-void __sanitizer_syscall_pre_impl_readlink(long long path, long long buf,
-                                           long long count);
-void __sanitizer_syscall_post_impl_readlink(long long res, long long path,
-                                            long long buf, long long count);
-void __sanitizer_syscall_pre_impl_execve(long long path, long long argp,
-                                         long long envp);
-void __sanitizer_syscall_post_impl_execve(long long res, long long path,
-                                          long long argp, long long envp);
-void __sanitizer_syscall_pre_impl_umask(long long newmask);
-void __sanitizer_syscall_post_impl_umask(long long res, long long newmask);
-void __sanitizer_syscall_pre_impl_chroot(long long path);
-void __sanitizer_syscall_post_impl_chroot(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_43_fstat43(long long fd, long long sb);
-void __sanitizer_syscall_post_impl_compat_43_fstat43(long long res,
-                                                     long long fd,
-                                                     long long sb);
-void __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo(long long op,
-                                                         long long where,
-                                                         long long size,
-                                                         long long arg);
-void __sanitizer_syscall_post_impl_compat_43_ogetkerninfo(long long res,
-                                                          long long op,
-                                                          long long where,
-                                                          long long size,
-                                                          long long arg);
-void __sanitizer_syscall_pre_impl_compat_43_ogetpagesize(void);
-void __sanitizer_syscall_post_impl_compat_43_ogetpagesize(long long res);
-void __sanitizer_syscall_pre_impl_compat_12_msync(long long addr,
-                                                  long long len);
-void __sanitizer_syscall_post_impl_compat_12_msync(long long res,
-                                                   long long addr,
-                                                   long long len);
-void __sanitizer_syscall_pre_impl_vfork(void);
-void __sanitizer_syscall_post_impl_vfork(long long res);
-/* syscall 67 has been skipped */
-/* syscall 68 has been skipped */
-/* syscall 69 has been skipped */
-/* syscall 70 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_43_ommap(long long addr, long long len,
-                                                  long long prot,
-                                                  long long flags, long long fd,
-                                                  long long pos);
-void __sanitizer_syscall_post_impl_compat_43_ommap(
-    long long res, long long addr, long long len, long long prot,
-    long long flags, long long fd, long long pos);
-void __sanitizer_syscall_pre_impl_vadvise(long long anom);
-void __sanitizer_syscall_post_impl_vadvise(long long res, long long anom);
-void __sanitizer_syscall_pre_impl_munmap(long long addr, long long len);
-void __sanitizer_syscall_post_impl_munmap(long long res, long long addr,
-                                          long long len);
-void __sanitizer_syscall_pre_impl_mprotect(long long addr, long long len,
-                                           long long prot);
-void __sanitizer_syscall_post_impl_mprotect(long long res, long long addr,
-                                            long long len, long long prot);
-void __sanitizer_syscall_pre_impl_madvise(long long addr, long long len,
-                                          long long behav);
-void __sanitizer_syscall_post_impl_madvise(long long res, long long addr,
-                                           long long len, long long behav);
-/* syscall 76 has been skipped */
-/* syscall 77 has been skipped */
-void __sanitizer_syscall_pre_impl_mincore(long long addr, long long len,
-                                          long long vec);
-void __sanitizer_syscall_post_impl_mincore(long long res, long long addr,
-                                           long long len, long long vec);
-void __sanitizer_syscall_pre_impl_getgroups(long long gidsetsize,
-                                            long long gidset);
-void __sanitizer_syscall_post_impl_getgroups(long long res,
-                                             long long gidsetsize,
-                                             long long gidset);
-void __sanitizer_syscall_pre_impl_setgroups(long long gidsetsize,
-                                            long long gidset);
-void __sanitizer_syscall_post_impl_setgroups(long long res,
-                                             long long gidsetsize,
-                                             long long gidset);
-void __sanitizer_syscall_pre_impl_getpgrp(void);
-void __sanitizer_syscall_post_impl_getpgrp(long long res);
-void __sanitizer_syscall_pre_impl_setpgid(long long pid, long long pgid);
-void __sanitizer_syscall_post_impl_setpgid(long long res, long long pid,
-                                           long long pgid);
-void __sanitizer_syscall_pre_impl_compat_50_setitimer(long long which,
-                                                      long long itv,
-                                                      long long oitv);
-void __sanitizer_syscall_post_impl_compat_50_setitimer(long long res,
-                                                       long long which,
-                                                       long long itv,
-                                                       long long oitv);
-void __sanitizer_syscall_pre_impl_compat_43_owait(void);
-void __sanitizer_syscall_post_impl_compat_43_owait(long long res);
-void __sanitizer_syscall_pre_impl_compat_12_oswapon(long long name);
-void __sanitizer_syscall_post_impl_compat_12_oswapon(long long res,
-                                                     long long name);
-void __sanitizer_syscall_pre_impl_compat_50_getitimer(long long which,
-                                                      long long itv);
-void __sanitizer_syscall_post_impl_compat_50_getitimer(long long res,
-                                                       long long which,
-                                                       long long itv);
-void __sanitizer_syscall_pre_impl_compat_43_ogethostname(long long hostname,
-                                                         long long len);
-void __sanitizer_syscall_post_impl_compat_43_ogethostname(long long res,
-                                                          long long hostname,
-                                                          long long len);
-void __sanitizer_syscall_pre_impl_compat_43_osethostname(long long hostname,
-                                                         long long len);
-void __sanitizer_syscall_post_impl_compat_43_osethostname(long long res,
-                                                          long long hostname,
-                                                          long long len);
-void __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize(void);
-void __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(long long res);
-void __sanitizer_syscall_pre_impl_dup2(long long from, long long to);
-void __sanitizer_syscall_post_impl_dup2(long long res, long long from,
-                                        long long to);
-/* syscall 91 has been skipped */
-void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd,
-                                        long long arg);
-void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd,
-                                         long long cmd, long long arg);
-void __sanitizer_syscall_pre_impl_compat_50_select(long long nd, long long in,
-                                                   long long ou, long long ex,
-                                                   long long tv);
-void __sanitizer_syscall_post_impl_compat_50_select(long long res, long long nd,
-                                                    long long in, long long ou,
-                                                    long long ex, long long tv);
-/* syscall 94 has been skipped */
-void __sanitizer_syscall_pre_impl_fsync(long long fd);
-void __sanitizer_syscall_post_impl_fsync(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_setpriority(long long which, long long who,
-                                              long long prio);
-void __sanitizer_syscall_post_impl_setpriority(long long res, long long which,
-                                               long long who, long long prio);
-void __sanitizer_syscall_pre_impl_compat_30_socket(long long domain,
-                                                   long long type,
-                                                   long long protocol);
-void __sanitizer_syscall_post_impl_compat_30_socket(long long res,
-                                                    long long domain,
-                                                    long long type,
-                                                    long long protocol);
-void __sanitizer_syscall_pre_impl_connect(long long s, long long name,
-                                          long long namelen);
-void __sanitizer_syscall_post_impl_connect(long long res, long long s,
-                                           long long name, long long namelen);
-void __sanitizer_syscall_pre_impl_compat_43_oaccept(long long s, long long name,
-                                                    long long anamelen);
-void __sanitizer_syscall_post_impl_compat_43_oaccept(long long res, long long s,
-                                                     long long name,
-                                                     long long anamelen);
-void __sanitizer_syscall_pre_impl_getpriority(long long which, long long who);
-void __sanitizer_syscall_post_impl_getpriority(long long res, long long which,
-                                               long long who);
-void __sanitizer_syscall_pre_impl_compat_43_osend(long long s, long long buf,
-                                                  long long len,
-                                                  long long flags);
-void __sanitizer_syscall_post_impl_compat_43_osend(long long res, long long s,
-                                                   long long buf, long long len,
-                                                   long long flags);
-void __sanitizer_syscall_pre_impl_compat_43_orecv(long long s, long long buf,
-                                                  long long len,
-                                                  long long flags);
-void __sanitizer_syscall_post_impl_compat_43_orecv(long long res, long long s,
-                                                   long long buf, long long len,
-                                                   long long flags);
-void __sanitizer_syscall_pre_impl_compat_13_sigreturn13(long long sigcntxp);
-void __sanitizer_syscall_post_impl_compat_13_sigreturn13(long long res,
-                                                         long long sigcntxp);
-void __sanitizer_syscall_pre_impl_bind(long long s, long long name,
-                                       long long namelen);
-void __sanitizer_syscall_post_impl_bind(long long res, long long s,
-                                        long long name, long long namelen);
-void __sanitizer_syscall_pre_impl_setsockopt(long long s, long long level,
-                                             long long name, long long val,
-                                             long long valsize);
-void __sanitizer_syscall_post_impl_setsockopt(long long res, long long s,
-                                              long long level, long long name,
-                                              long long val, long long valsize);
-void __sanitizer_syscall_pre_impl_listen(long long s, long long backlog);
-void __sanitizer_syscall_post_impl_listen(long long res, long long s,
-                                          long long backlog);
-/* syscall 107 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_43_osigvec(long long signum,
-                                                    long long nsv,
-                                                    long long osv);
-void __sanitizer_syscall_post_impl_compat_43_osigvec(long long res,
-                                                     long long signum,
-                                                     long long nsv,
-                                                     long long osv);
-void __sanitizer_syscall_pre_impl_compat_43_osigblock(long long mask);
-void __sanitizer_syscall_post_impl_compat_43_osigblock(long long res,
-                                                       long long mask);
-void __sanitizer_syscall_pre_impl_compat_43_osigsetmask(long long mask);
-void __sanitizer_syscall_post_impl_compat_43_osigsetmask(long long res,
-                                                         long long mask);
-void __sanitizer_syscall_pre_impl_compat_13_sigsuspend13(long long mask);
-void __sanitizer_syscall_post_impl_compat_13_sigsuspend13(long long res,
-                                                          long long mask);
-void __sanitizer_syscall_pre_impl_compat_43_osigstack(long long nss,
-                                                      long long oss);
-void __sanitizer_syscall_post_impl_compat_43_osigstack(long long res,
-                                                       long long nss,
-                                                       long long oss);
-void __sanitizer_syscall_pre_impl_compat_43_orecvmsg(long long s, long long msg,
-                                                     long long flags);
-void __sanitizer_syscall_post_impl_compat_43_orecvmsg(long long res,
-                                                      long long s,
-                                                      long long msg,
-                                                      long long flags);
-void __sanitizer_syscall_pre_impl_compat_43_osendmsg(long long s, long long msg,
-                                                     long long flags);
-void __sanitizer_syscall_post_impl_compat_43_osendmsg(long long res,
-                                                      long long s,
-                                                      long long msg,
-                                                      long long flags);
-/* syscall 115 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_50_gettimeofday(long long tp,
-                                                         long long tzp);
-void __sanitizer_syscall_post_impl_compat_50_gettimeofday(long long res,
-                                                          long long tp,
-                                                          long long tzp);
-void __sanitizer_syscall_pre_impl_compat_50_getrusage(long long who,
-                                                      long long rusage);
-void __sanitizer_syscall_post_impl_compat_50_getrusage(long long res,
-                                                       long long who,
-                                                       long long rusage);
-void __sanitizer_syscall_pre_impl_getsockopt(long long s, long long level,
-                                             long long name, long long val,
-                                             long long avalsize);
-void __sanitizer_syscall_post_impl_getsockopt(long long res, long long s,
-                                              long long level, long long name,
-                                              long long val,
-                                              long long avalsize);
-/* syscall 119 has been skipped */
-void __sanitizer_syscall_pre_impl_readv(long long fd, long long iovp,
-                                        long long iovcnt);
-void __sanitizer_syscall_post_impl_readv(long long res, long long fd,
-                                         long long iovp, long long iovcnt);
-void __sanitizer_syscall_pre_impl_writev(long long fd, long long iovp,
-                                         long long iovcnt);
-void __sanitizer_syscall_post_impl_writev(long long res, long long fd,
-                                          long long iovp, long long iovcnt);
-void __sanitizer_syscall_pre_impl_compat_50_settimeofday(long long tv,
-                                                         long long tzp);
-void __sanitizer_syscall_post_impl_compat_50_settimeofday(long long res,
-                                                          long long tv,
-                                                          long long tzp);
-void __sanitizer_syscall_pre_impl_fchown(long long fd, long long uid,
-                                         long long gid);
-void __sanitizer_syscall_post_impl_fchown(long long res, long long fd,
-                                          long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_fchmod(long long fd, long long mode);
-void __sanitizer_syscall_post_impl_fchmod(long long res, long long fd,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_compat_43_orecvfrom(
-    long long s, long long buf, long long len, long long flags, long long from,
-    long long fromlenaddr);
-void __sanitizer_syscall_post_impl_compat_43_orecvfrom(
-    long long res, long long s, long long buf, long long len, long long flags,
-    long long from, long long fromlenaddr);
-void __sanitizer_syscall_pre_impl_setreuid(long long ruid, long long euid);
-void __sanitizer_syscall_post_impl_setreuid(long long res, long long ruid,
-                                            long long euid);
-void __sanitizer_syscall_pre_impl_setregid(long long rgid, long long egid);
-void __sanitizer_syscall_post_impl_setregid(long long res, long long rgid,
-                                            long long egid);
-void __sanitizer_syscall_pre_impl_rename(long long from, long long to);
-void __sanitizer_syscall_post_impl_rename(long long res, long long from,
-                                          long long to);
-void __sanitizer_syscall_pre_impl_compat_43_otruncate(long long path,
-                                                      long long length);
-void __sanitizer_syscall_post_impl_compat_43_otruncate(long long res,
-                                                       long long path,
-                                                       long long length);
-void __sanitizer_syscall_pre_impl_compat_43_oftruncate(long long fd,
-                                                       long long length);
-void __sanitizer_syscall_post_impl_compat_43_oftruncate(long long res,
-                                                        long long fd,
-                                                        long long length);
-void __sanitizer_syscall_pre_impl_flock(long long fd, long long how);
-void __sanitizer_syscall_post_impl_flock(long long res, long long fd,
-                                         long long how);
-void __sanitizer_syscall_pre_impl_mkfifo(long long path, long long mode);
-void __sanitizer_syscall_post_impl_mkfifo(long long res, long long path,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_sendto(long long s, long long buf,
-                                         long long len, long long flags,
-                                         long long to, long long tolen);
-void __sanitizer_syscall_post_impl_sendto(long long res, long long s,
-                                          long long buf, long long len,
-                                          long long flags, long long to,
-                                          long long tolen);
-void __sanitizer_syscall_pre_impl_shutdown(long long s, long long how);
-void __sanitizer_syscall_post_impl_shutdown(long long res, long long s,
-                                            long long how);
-void __sanitizer_syscall_pre_impl_socketpair(long long domain, long long type,
-                                             long long protocol, long long rsv);
-void __sanitizer_syscall_post_impl_socketpair(long long res, long long domain,
-                                              long long type,
-                                              long long protocol,
-                                              long long rsv);
-void __sanitizer_syscall_pre_impl_mkdir(long long path, long long mode);
-void __sanitizer_syscall_post_impl_mkdir(long long res, long long path,
-                                         long long mode);
-void __sanitizer_syscall_pre_impl_rmdir(long long path);
-void __sanitizer_syscall_post_impl_rmdir(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_50_utimes(long long path,
-                                                   long long tptr);
-void __sanitizer_syscall_post_impl_compat_50_utimes(long long res,
-                                                    long long path,
-                                                    long long tptr);
-/* syscall 139 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_50_adjtime(long long delta,
-                                                    long long olddelta);
-void __sanitizer_syscall_post_impl_compat_50_adjtime(long long res,
-                                                     long long delta,
-                                                     long long olddelta);
-void __sanitizer_syscall_pre_impl_compat_43_ogetpeername(long long fdes,
-                                                         long long asa,
-                                                         long long alen);
-void __sanitizer_syscall_post_impl_compat_43_ogetpeername(long long res,
-                                                          long long fdes,
-                                                          long long asa,
-                                                          long long alen);
-void __sanitizer_syscall_pre_impl_compat_43_ogethostid(void);
-void __sanitizer_syscall_post_impl_compat_43_ogethostid(long long res);
-void __sanitizer_syscall_pre_impl_compat_43_osethostid(long long hostid);
-void __sanitizer_syscall_post_impl_compat_43_osethostid(long long res,
-                                                        long long hostid);
-void __sanitizer_syscall_pre_impl_compat_43_ogetrlimit(long long which,
-                                                       long long rlp);
-void __sanitizer_syscall_post_impl_compat_43_ogetrlimit(long long res,
-                                                        long long which,
-                                                        long long rlp);
-void __sanitizer_syscall_pre_impl_compat_43_osetrlimit(long long which,
-                                                       long long rlp);
-void __sanitizer_syscall_post_impl_compat_43_osetrlimit(long long res,
-                                                        long long which,
-                                                        long long rlp);
-void __sanitizer_syscall_pre_impl_compat_43_okillpg(long long pgid,
-                                                    long long signum);
-void __sanitizer_syscall_post_impl_compat_43_okillpg(long long res,
-                                                     long long pgid,
-                                                     long long signum);
-void __sanitizer_syscall_pre_impl_setsid(void);
-void __sanitizer_syscall_post_impl_setsid(long long res);
-void __sanitizer_syscall_pre_impl_compat_50_quotactl(long long path,
-                                                     long long cmd,
-                                                     long long uid,
-                                                     long long arg);
-void __sanitizer_syscall_post_impl_compat_50_quotactl(
-    long long res, long long path, long long cmd, long long uid, long long arg);
-void __sanitizer_syscall_pre_impl_compat_43_oquota(void);
-void __sanitizer_syscall_post_impl_compat_43_oquota(long long res);
-void __sanitizer_syscall_pre_impl_compat_43_ogetsockname(long long fdec,
-                                                         long long asa,
-                                                         long long alen);
-void __sanitizer_syscall_post_impl_compat_43_ogetsockname(long long res,
-                                                          long long fdec,
-                                                          long long asa,
-                                                          long long alen);
-/* syscall 151 has been skipped */
-/* syscall 152 has been skipped */
-/* syscall 153 has been skipped */
-/* syscall 154 has been skipped */
-void __sanitizer_syscall_pre_impl_nfssvc(long long flag, long long argp);
-void __sanitizer_syscall_post_impl_nfssvc(long long res, long long flag,
-                                          long long argp);
-void __sanitizer_syscall_pre_impl_compat_43_ogetdirentries(long long fd,
-                                                           long long buf,
-                                                           long long count,
-                                                           long long basep);
-void __sanitizer_syscall_post_impl_compat_43_ogetdirentries(long long res,
-                                                            long long fd,
-                                                            long long buf,
-                                                            long long count,
-                                                            long long basep);
-void __sanitizer_syscall_pre_impl_compat_20_statfs(long long path,
-                                                   long long buf);
-void __sanitizer_syscall_post_impl_compat_20_statfs(long long res,
-                                                    long long path,
-                                                    long long buf);
-void __sanitizer_syscall_pre_impl_compat_20_fstatfs(long long fd,
-                                                    long long buf);
-void __sanitizer_syscall_post_impl_compat_20_fstatfs(long long res,
-                                                     long long fd,
-                                                     long long buf);
-/* syscall 159 has been skipped */
-/* syscall 160 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_30_getfh(long long fname,
-                                                  long long fhp);
-void __sanitizer_syscall_post_impl_compat_30_getfh(long long res,
-                                                   long long fname,
-                                                   long long fhp);
-void __sanitizer_syscall_pre_impl_compat_09_ogetdomainname(long long domainname,
-                                                           long long len);
-void __sanitizer_syscall_post_impl_compat_09_ogetdomainname(
-    long long res, long long domainname, long long len);
-void __sanitizer_syscall_pre_impl_compat_09_osetdomainname(long long domainname,
-                                                           long long len);
-void __sanitizer_syscall_post_impl_compat_09_osetdomainname(
-    long long res, long long domainname, long long len);
-void __sanitizer_syscall_pre_impl_compat_09_ouname(long long name);
-void __sanitizer_syscall_post_impl_compat_09_ouname(long long res,
-                                                    long long name);
-void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms);
-void __sanitizer_syscall_post_impl_sysarch(long long res, long long op,
-                                           long long parms);
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
-#if !defined(_LP64)
-void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which,
-                                                    long long a2, long long a3,
-                                                    long long a4, long long a5);
-void __sanitizer_syscall_post_impl_compat_10_osemsys(long long res,
-                                                     long long which,
-                                                     long long a2, long long a3,
-                                                     long long a4,
-                                                     long long a5);
-#else
-/* syscall 169 has been skipped */
-#endif
-#if !defined(_LP64)
-void __sanitizer_syscall_pre_impl_compat_10_omsgsys(long long which,
-                                                    long long a2, long long a3,
-                                                    long long a4, long long a5,
-                                                    long long a6);
-void __sanitizer_syscall_post_impl_compat_10_omsgsys(long long res,
-                                                     long long which,
-                                                     long long a2, long long a3,
-                                                     long long a4, long long a5,
-                                                     long long a6);
-#else
-/* syscall 170 has been skipped */
-#endif
-#if !defined(_LP64)
-void __sanitizer_syscall_pre_impl_compat_10_oshmsys(long long which,
-                                                    long long a2, long long a3,
-                                                    long long a4);
-void __sanitizer_syscall_post_impl_compat_10_oshmsys(long long res,
-                                                     long long which,
-                                                     long long a2, long long a3,
-                                                     long long a4);
-#else
-/* syscall 171 has been skipped */
-#endif
-/* syscall 172 has been skipped */
-void __sanitizer_syscall_pre_impl_pread(long long fd, long long buf,
-                                        long long nbyte, long long PAD,
-                                        long long offset);
-void __sanitizer_syscall_post_impl_pread(long long res, long long fd,
-                                         long long buf, long long nbyte,
-                                         long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_pwrite(long long fd, long long buf,
-                                         long long nbyte, long long PAD,
-                                         long long offset);
-void __sanitizer_syscall_post_impl_pwrite(long long res, long long fd,
-                                          long long buf, long long nbyte,
-                                          long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_compat_30_ntp_gettime(long long ntvp);
-void __sanitizer_syscall_post_impl_compat_30_ntp_gettime(long long res,
-                                                         long long ntvp);
-#if defined(NTP) || !defined(_KERNEL_OPT)
-void __sanitizer_syscall_pre_impl_ntp_adjtime(long long tp);
-void __sanitizer_syscall_post_impl_ntp_adjtime(long long res, long long tp);
-#else
-/* syscall 176 has been skipped */
-#endif
-/* syscall 177 has been skipped */
-/* syscall 178 has been skipped */
-/* syscall 179 has been skipped */
-/* syscall 180 has been skipped */
-void __sanitizer_syscall_pre_impl_setgid(long long gid);
-void __sanitizer_syscall_post_impl_setgid(long long res, long long gid);
-void __sanitizer_syscall_pre_impl_setegid(long long egid);
-void __sanitizer_syscall_post_impl_setegid(long long res, long long egid);
-void __sanitizer_syscall_pre_impl_seteuid(long long euid);
-void __sanitizer_syscall_post_impl_seteuid(long long res, long long euid);
-void __sanitizer_syscall_pre_impl_lfs_bmapv(long long fsidp, long long blkiov,
-                                            long long blkcnt);
-void __sanitizer_syscall_post_impl_lfs_bmapv(long long res, long long fsidp,
-                                             long long blkiov,
-                                             long long blkcnt);
-void __sanitizer_syscall_pre_impl_lfs_markv(long long fsidp, long long blkiov,
-                                            long long blkcnt);
-void __sanitizer_syscall_post_impl_lfs_markv(long long res, long long fsidp,
-                                             long long blkiov,
-                                             long long blkcnt);
-void __sanitizer_syscall_pre_impl_lfs_segclean(long long fsidp,
-                                               long long segment);
-void __sanitizer_syscall_post_impl_lfs_segclean(long long res, long long fsidp,
-                                                long long segment);
-void __sanitizer_syscall_pre_impl_compat_50_lfs_segwait(long long fsidp,
-                                                        long long tv);
-void __sanitizer_syscall_post_impl_compat_50_lfs_segwait(long long res,
-                                                         long long fsidp,
-                                                         long long tv);
-void __sanitizer_syscall_pre_impl_compat_12_stat12(long long path,
-                                                   long long ub);
-void __sanitizer_syscall_post_impl_compat_12_stat12(long long res,
-                                                    long long path,
-                                                    long long ub);
-void __sanitizer_syscall_pre_impl_compat_12_fstat12(long long fd, long long sb);
-void __sanitizer_syscall_post_impl_compat_12_fstat12(long long res,
-                                                     long long fd,
-                                                     long long sb);
-void __sanitizer_syscall_pre_impl_compat_12_lstat12(long long path,
-                                                    long long ub);
-void __sanitizer_syscall_post_impl_compat_12_lstat12(long long res,
-                                                     long long path,
-                                                     long long ub);
-void __sanitizer_syscall_pre_impl_pathconf(long long path, long long name);
-void __sanitizer_syscall_post_impl_pathconf(long long res, long long path,
-                                            long long name);
-void __sanitizer_syscall_pre_impl_fpathconf(long long fd, long long name);
-void __sanitizer_syscall_post_impl_fpathconf(long long res, long long fd,
-                                             long long name);
-void __sanitizer_syscall_pre_impl_getsockopt2(long long s, long long level,
-                                              long long name, long long val,
-                                              long long avalsize);
-void __sanitizer_syscall_post_impl_getsockopt2(long long res, long long s,
-                                               long long level, long long name,
-                                               long long val,
-                                               long long avalsize);
-void __sanitizer_syscall_pre_impl_getrlimit(long long which, long long rlp);
-void __sanitizer_syscall_post_impl_getrlimit(long long res, long long which,
-                                             long long rlp);
-void __sanitizer_syscall_pre_impl_setrlimit(long long which, long long rlp);
-void __sanitizer_syscall_post_impl_setrlimit(long long res, long long which,
-                                             long long rlp);
-void __sanitizer_syscall_pre_impl_compat_12_getdirentries(long long fd,
-                                                          long long buf,
-                                                          long long count,
-                                                          long long basep);
-void __sanitizer_syscall_post_impl_compat_12_getdirentries(long long res,
-                                                           long long fd,
-                                                           long long buf,
-                                                           long long count,
-                                                           long long basep);
-void __sanitizer_syscall_pre_impl_mmap(long long addr, long long len,
-                                       long long prot, long long flags,
-                                       long long fd, long long PAD,
-                                       long long pos);
-void __sanitizer_syscall_post_impl_mmap(long long res, long long addr,
-                                        long long len, long long prot,
-                                        long long flags, long long fd,
-                                        long long PAD, long long pos);
-void __sanitizer_syscall_pre_impl___syscall(long long code, long long arg0,
-                                            long long arg1, long long arg2,
-                                            long long arg3, long long arg4,
-                                            long long arg5, long long arg6,
-                                            long long arg7);
-void __sanitizer_syscall_post_impl___syscall(long long res, long long code,
-                                             long long arg0, long long arg1,
-                                             long long arg2, long long arg3,
-                                             long long arg4, long long arg5,
-                                             long long arg6, long long arg7);
-void __sanitizer_syscall_pre_impl_lseek(long long fd, long long PAD,
-                                        long long offset, long long whence);
-void __sanitizer_syscall_post_impl_lseek(long long res, long long fd,
-                                         long long PAD, long long offset,
-                                         long long whence);
-void __sanitizer_syscall_pre_impl_truncate(long long path, long long PAD,
-                                           long long length);
-void __sanitizer_syscall_post_impl_truncate(long long res, long long path,
-                                            long long PAD, long long length);
-void __sanitizer_syscall_pre_impl_ftruncate(long long fd, long long PAD,
-                                            long long length);
-void __sanitizer_syscall_post_impl_ftruncate(long long res, long long fd,
-                                             long long PAD, long long length);
-void __sanitizer_syscall_pre_impl___sysctl(long long name, long long namelen,
-                                           long long oldv, long long oldlenp,
-                                           long long newv, long long newlen);
-void __sanitizer_syscall_post_impl___sysctl(long long res, long long name,
-                                            long long namelen, long long oldv,
-                                            long long oldlenp, long long newv,
-                                            long long newlen);
-void __sanitizer_syscall_pre_impl_mlock(long long addr, long long len);
-void __sanitizer_syscall_post_impl_mlock(long long res, long long addr,
-                                         long long len);
-void __sanitizer_syscall_pre_impl_munlock(long long addr, long long len);
-void __sanitizer_syscall_post_impl_munlock(long long res, long long addr,
-                                           long long len);
-void __sanitizer_syscall_pre_impl_undelete(long long path);
-void __sanitizer_syscall_post_impl_undelete(long long res, long long path);
-void __sanitizer_syscall_pre_impl_compat_50_futimes(long long fd,
-                                                    long long tptr);
-void __sanitizer_syscall_post_impl_compat_50_futimes(long long res,
-                                                     long long fd,
-                                                     long long tptr);
-void __sanitizer_syscall_pre_impl_getpgid(long long pid);
-void __sanitizer_syscall_post_impl_getpgid(long long res, long long pid);
-void __sanitizer_syscall_pre_impl_reboot(long long opt, long long bootstr);
-void __sanitizer_syscall_post_impl_reboot(long long res, long long opt,
-                                          long long bootstr);
-void __sanitizer_syscall_pre_impl_poll(long long fds, long long nfds,
-                                       long long timeout);
-void __sanitizer_syscall_post_impl_poll(long long res, long long fds,
-                                        long long nfds, long long timeout);
-void __sanitizer_syscall_pre_impl_afssys(long long id, long long a1,
-                                         long long a2, long long a3,
-                                         long long a4, long long a5,
-                                         long long a6);
-void __sanitizer_syscall_post_impl_afssys(long long res, long long id,
-                                          long long a1, long long a2,
-                                          long long a3, long long a4,
-                                          long long a5, long long a6);
-/* syscall 211 has been skipped */
-/* syscall 212 has been skipped */
-/* syscall 213 has been skipped */
-/* syscall 214 has been skipped */
-/* syscall 215 has been skipped */
-/* syscall 216 has been skipped */
-/* syscall 217 has been skipped */
-/* syscall 218 has been skipped */
-/* syscall 219 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_14___semctl(long long semid,
-                                                     long long semnum,
-                                                     long long cmd,
-                                                     long long arg);
-void __sanitizer_syscall_post_impl_compat_14___semctl(long long res,
-                                                      long long semid,
-                                                      long long semnum,
-                                                      long long cmd,
-                                                      long long arg);
-void __sanitizer_syscall_pre_impl_semget(long long key, long long nsems,
-                                         long long semflg);
-void __sanitizer_syscall_post_impl_semget(long long res, long long key,
-                                          long long nsems, long long semflg);
-void __sanitizer_syscall_pre_impl_semop(long long semid, long long sops,
-                                        long long nsops);
-void __sanitizer_syscall_post_impl_semop(long long res, long long semid,
-                                         long long sops, long long nsops);
-void __sanitizer_syscall_pre_impl_semconfig(long long flag);
-void __sanitizer_syscall_post_impl_semconfig(long long res, long long flag);
-void __sanitizer_syscall_pre_impl_compat_14_msgctl(long long msqid,
-                                                   long long cmd,
-                                                   long long buf);
-void __sanitizer_syscall_post_impl_compat_14_msgctl(long long res,
-                                                    long long msqid,
-                                                    long long cmd,
-                                                    long long buf);
-void __sanitizer_syscall_pre_impl_msgget(long long key, long long msgflg);
-void __sanitizer_syscall_post_impl_msgget(long long res, long long key,
-                                          long long msgflg);
-void __sanitizer_syscall_pre_impl_msgsnd(long long msqid, long long msgp,
-                                         long long msgsz, long long msgflg);
-void __sanitizer_syscall_post_impl_msgsnd(long long res, long long msqid,
-                                          long long msgp, long long msgsz,
-                                          long long msgflg);
-void __sanitizer_syscall_pre_impl_msgrcv(long long msqid, long long msgp,
-                                         long long msgsz, long long msgtyp,
-                                         long long msgflg);
-void __sanitizer_syscall_post_impl_msgrcv(long long res, long long msqid,
-                                          long long msgp, long long msgsz,
-                                          long long msgtyp, long long msgflg);
-void __sanitizer_syscall_pre_impl_shmat(long long shmid, long long shmaddr,
-                                        long long shmflg);
-void __sanitizer_syscall_post_impl_shmat(long long res, long long shmid,
-                                         long long shmaddr, long long shmflg);
-void __sanitizer_syscall_pre_impl_compat_14_shmctl(long long shmid,
-                                                   long long cmd,
-                                                   long long buf);
-void __sanitizer_syscall_post_impl_compat_14_shmctl(long long res,
-                                                    long long shmid,
-                                                    long long cmd,
-                                                    long long buf);
-void __sanitizer_syscall_pre_impl_shmdt(long long shmaddr);
-void __sanitizer_syscall_post_impl_shmdt(long long res, long long shmaddr);
-void __sanitizer_syscall_pre_impl_shmget(long long key, long long size,
-                                         long long shmflg);
-void __sanitizer_syscall_post_impl_shmget(long long res, long long key,
-                                          long long size, long long shmflg);
-void __sanitizer_syscall_pre_impl_compat_50_clock_gettime(long long clock_id,
-                                                          long long tp);
-void __sanitizer_syscall_post_impl_compat_50_clock_gettime(long long res,
-                                                           long long clock_id,
-                                                           long long tp);
-void __sanitizer_syscall_pre_impl_compat_50_clock_settime(long long clock_id,
-                                                          long long tp);
-void __sanitizer_syscall_post_impl_compat_50_clock_settime(long long res,
-                                                           long long clock_id,
-                                                           long long tp);
-void __sanitizer_syscall_pre_impl_compat_50_clock_getres(long long clock_id,
-                                                         long long tp);
-void __sanitizer_syscall_post_impl_compat_50_clock_getres(long long res,
-                                                          long long clock_id,
-                                                          long long tp);
-void __sanitizer_syscall_pre_impl_timer_create(long long clock_id,
-                                               long long evp,
-                                               long long timerid);
-void __sanitizer_syscall_post_impl_timer_create(long long res,
-                                                long long clock_id,
-                                                long long evp,
-                                                long long timerid);
-void __sanitizer_syscall_pre_impl_timer_delete(long long timerid);
-void __sanitizer_syscall_post_impl_timer_delete(long long res,
-                                                long long timerid);
-void __sanitizer_syscall_pre_impl_compat_50_timer_settime(long long timerid,
-                                                          long long flags,
-                                                          long long value,
-                                                          long long ovalue);
-void __sanitizer_syscall_post_impl_compat_50_timer_settime(long long res,
-                                                           long long timerid,
-                                                           long long flags,
-                                                           long long value,
-                                                           long long ovalue);
-void __sanitizer_syscall_pre_impl_compat_50_timer_gettime(long long timerid,
-                                                          long long value);
-void __sanitizer_syscall_post_impl_compat_50_timer_gettime(long long res,
-                                                           long long timerid,
-                                                           long long value);
-void __sanitizer_syscall_pre_impl_timer_getoverrun(long long timerid);
-void __sanitizer_syscall_post_impl_timer_getoverrun(long long res,
-                                                    long long timerid);
-void __sanitizer_syscall_pre_impl_compat_50_nanosleep(long long rqtp,
-                                                      long long rmtp);
-void __sanitizer_syscall_post_impl_compat_50_nanosleep(long long res,
-                                                       long long rqtp,
-                                                       long long rmtp);
-void __sanitizer_syscall_pre_impl_fdatasync(long long fd);
-void __sanitizer_syscall_post_impl_fdatasync(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_mlockall(long long flags);
-void __sanitizer_syscall_post_impl_mlockall(long long res, long long flags);
-void __sanitizer_syscall_pre_impl_munlockall(void);
-void __sanitizer_syscall_post_impl_munlockall(long long res);
-void __sanitizer_syscall_pre_impl_compat_50___sigtimedwait(long long set,
-                                                           long long info,
-                                                           long long timeout);
-void __sanitizer_syscall_post_impl_compat_50___sigtimedwait(long long res,
-                                                            long long set,
-                                                            long long info,
-                                                            long long timeout);
-void __sanitizer_syscall_pre_impl_sigqueueinfo(long long pid, long long info);
-void __sanitizer_syscall_post_impl_sigqueueinfo(long long res, long long pid,
-                                                long long info);
-void __sanitizer_syscall_pre_impl_modctl(long long cmd, long long arg);
-void __sanitizer_syscall_post_impl_modctl(long long res, long long cmd,
-                                          long long arg);
-void __sanitizer_syscall_pre_impl__ksem_init(long long value, long long idp);
-void __sanitizer_syscall_post_impl__ksem_init(long long res, long long value,
-                                              long long idp);
-void __sanitizer_syscall_pre_impl__ksem_open(long long name, long long oflag,
-                                             long long mode, long long value,
-                                             long long idp);
-void __sanitizer_syscall_post_impl__ksem_open(long long res, long long name,
-                                              long long oflag, long long mode,
-                                              long long value, long long idp);
-void __sanitizer_syscall_pre_impl__ksem_unlink(long long name);
-void __sanitizer_syscall_post_impl__ksem_unlink(long long res, long long name);
-void __sanitizer_syscall_pre_impl__ksem_close(long long id);
-void __sanitizer_syscall_post_impl__ksem_close(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_post(long long id);
-void __sanitizer_syscall_post_impl__ksem_post(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_wait(long long id);
-void __sanitizer_syscall_post_impl__ksem_wait(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_trywait(long long id);
-void __sanitizer_syscall_post_impl__ksem_trywait(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_getvalue(long long id, long long value);
-void __sanitizer_syscall_post_impl__ksem_getvalue(long long res, long long id,
-                                                  long long value);
-void __sanitizer_syscall_pre_impl__ksem_destroy(long long id);
-void __sanitizer_syscall_post_impl__ksem_destroy(long long res, long long id);
-void __sanitizer_syscall_pre_impl__ksem_timedwait(long long id,
-                                                  long long abstime);
-void __sanitizer_syscall_post_impl__ksem_timedwait(long long res, long long id,
-                                                   long long abstime);
-void __sanitizer_syscall_pre_impl_mq_open(long long name, long long oflag,
-                                          long long mode, long long attr);
-void __sanitizer_syscall_post_impl_mq_open(long long res, long long name,
-                                           long long oflag, long long mode,
-                                           long long attr);
-void __sanitizer_syscall_pre_impl_mq_close(long long mqdes);
-void __sanitizer_syscall_post_impl_mq_close(long long res, long long mqdes);
-void __sanitizer_syscall_pre_impl_mq_unlink(long long name);
-void __sanitizer_syscall_post_impl_mq_unlink(long long res, long long name);
-void __sanitizer_syscall_pre_impl_mq_getattr(long long mqdes, long long mqstat);
-void __sanitizer_syscall_post_impl_mq_getattr(long long res, long long mqdes,
-                                              long long mqstat);
-void __sanitizer_syscall_pre_impl_mq_setattr(long long mqdes, long long mqstat,
-                                             long long omqstat);
-void __sanitizer_syscall_post_impl_mq_setattr(long long res, long long mqdes,
-                                              long long mqstat,
-                                              long long omqstat);
-void __sanitizer_syscall_pre_impl_mq_notify(long long mqdes,
-                                            long long notification);
-void __sanitizer_syscall_post_impl_mq_notify(long long res, long long mqdes,
-                                             long long notification);
-void __sanitizer_syscall_pre_impl_mq_send(long long mqdes, long long msg_ptr,
-                                          long long msg_len,
-                                          long long msg_prio);
-void __sanitizer_syscall_post_impl_mq_send(long long res, long long mqdes,
-                                           long long msg_ptr, long long msg_len,
-                                           long long msg_prio);
-void __sanitizer_syscall_pre_impl_mq_receive(long long mqdes, long long msg_ptr,
-                                             long long msg_len,
-                                             long long msg_prio);
-void __sanitizer_syscall_post_impl_mq_receive(long long res, long long mqdes,
-                                              long long msg_ptr,
-                                              long long msg_len,
-                                              long long msg_prio);
-void __sanitizer_syscall_pre_impl_compat_50_mq_timedsend(long long mqdes,
-                                                         long long msg_ptr,
-                                                         long long msg_len,
-                                                         long long msg_prio,
-                                                         long long abs_timeout);
-void __sanitizer_syscall_post_impl_compat_50_mq_timedsend(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-void __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive(
-    long long mqdes, long long msg_ptr, long long msg_len, long long msg_prio,
-    long long abs_timeout);
-void __sanitizer_syscall_post_impl_compat_50_mq_timedreceive(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-/* syscall 267 has been skipped */
-/* syscall 268 has been skipped */
-/* syscall 269 has been skipped */
-void __sanitizer_syscall_pre_impl___posix_rename(long long from, long long to);
-void __sanitizer_syscall_post_impl___posix_rename(long long res, long long from,
-                                                  long long to);
-void __sanitizer_syscall_pre_impl_swapctl(long long cmd, long long arg,
-                                          long long misc);
-void __sanitizer_syscall_post_impl_swapctl(long long res, long long cmd,
-                                           long long arg, long long misc);
-void __sanitizer_syscall_pre_impl_compat_30_getdents(long long fd,
-                                                     long long buf,
-                                                     long long count);
-void __sanitizer_syscall_post_impl_compat_30_getdents(long long res,
-                                                      long long fd,
-                                                      long long buf,
-                                                      long long count);
-void __sanitizer_syscall_pre_impl_minherit(long long addr, long long len,
-                                           long long inherit);
-void __sanitizer_syscall_post_impl_minherit(long long res, long long addr,
-                                            long long len, long long inherit);
-void __sanitizer_syscall_pre_impl_lchmod(long long path, long long mode);
-void __sanitizer_syscall_post_impl_lchmod(long long res, long long path,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_lchown(long long path, long long uid,
-                                         long long gid);
-void __sanitizer_syscall_post_impl_lchown(long long res, long long path,
-                                          long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_compat_50_lutimes(long long path,
-                                                    long long tptr);
-void __sanitizer_syscall_post_impl_compat_50_lutimes(long long res,
-                                                     long long path,
-                                                     long long tptr);
-void __sanitizer_syscall_pre_impl___msync13(long long addr, long long len,
-                                            long long flags);
-void __sanitizer_syscall_post_impl___msync13(long long res, long long addr,
-                                             long long len, long long flags);
-void __sanitizer_syscall_pre_impl_compat_30___stat13(long long path,
-                                                     long long ub);
-void __sanitizer_syscall_post_impl_compat_30___stat13(long long res,
-                                                      long long path,
-                                                      long long ub);
-void __sanitizer_syscall_pre_impl_compat_30___fstat13(long long fd,
-                                                      long long sb);
-void __sanitizer_syscall_post_impl_compat_30___fstat13(long long res,
-                                                       long long fd,
-                                                       long long sb);
-void __sanitizer_syscall_pre_impl_compat_30___lstat13(long long path,
-                                                      long long ub);
-void __sanitizer_syscall_post_impl_compat_30___lstat13(long long res,
-                                                       long long path,
-                                                       long long ub);
-void __sanitizer_syscall_pre_impl___sigaltstack14(long long nss, long long oss);
-void __sanitizer_syscall_post_impl___sigaltstack14(long long res, long long nss,
-                                                   long long oss);
-void __sanitizer_syscall_pre_impl___vfork14(void);
-void __sanitizer_syscall_post_impl___vfork14(long long res);
-void __sanitizer_syscall_pre_impl___posix_chown(long long path, long long uid,
-                                                long long gid);
-void __sanitizer_syscall_post_impl___posix_chown(long long res, long long path,
-                                                 long long uid, long long gid);
-void __sanitizer_syscall_pre_impl___posix_fchown(long long fd, long long uid,
-                                                 long long gid);
-void __sanitizer_syscall_post_impl___posix_fchown(long long res, long long fd,
-                                                  long long uid, long long gid);
-void __sanitizer_syscall_pre_impl___posix_lchown(long long path, long long uid,
-                                                 long long gid);
-void __sanitizer_syscall_post_impl___posix_lchown(long long res, long long path,
-                                                  long long uid, long long gid);
-void __sanitizer_syscall_pre_impl_getsid(long long pid);
-void __sanitizer_syscall_post_impl_getsid(long long res, long long pid);
-void __sanitizer_syscall_pre_impl___clone(long long flags, long long stack);
-void __sanitizer_syscall_post_impl___clone(long long res, long long flags,
-                                           long long stack);
-void __sanitizer_syscall_pre_impl_fktrace(long long fd, long long ops,
-                                          long long facs, long long pid);
-void __sanitizer_syscall_post_impl_fktrace(long long res, long long fd,
-                                           long long ops, long long facs,
-                                           long long pid);
-void __sanitizer_syscall_pre_impl_preadv(long long fd, long long iovp,
-                                         long long iovcnt, long long PAD,
-                                         long long offset);
-void __sanitizer_syscall_post_impl_preadv(long long res, long long fd,
-                                          long long iovp, long long iovcnt,
-                                          long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_pwritev(long long fd, long long iovp,
-                                          long long iovcnt, long long PAD,
-                                          long long offset);
-void __sanitizer_syscall_post_impl_pwritev(long long res, long long fd,
-                                           long long iovp, long long iovcnt,
-                                           long long PAD, long long offset);
-void __sanitizer_syscall_pre_impl_compat_16___sigaction14(long long signum,
-                                                          long long nsa,
-                                                          long long osa);
-void __sanitizer_syscall_post_impl_compat_16___sigaction14(long long res,
-                                                           long long signum,
-                                                           long long nsa,
-                                                           long long osa);
-void __sanitizer_syscall_pre_impl___sigpending14(long long set);
-void __sanitizer_syscall_post_impl___sigpending14(long long res, long long set);
-void __sanitizer_syscall_pre_impl___sigprocmask14(long long how, long long set,
-                                                  long long oset);
-void __sanitizer_syscall_post_impl___sigprocmask14(long long res, long long how,
-                                                   long long set,
-                                                   long long oset);
-void __sanitizer_syscall_pre_impl___sigsuspend14(long long set);
-void __sanitizer_syscall_post_impl___sigsuspend14(long long res, long long set);
-void __sanitizer_syscall_pre_impl_compat_16___sigreturn14(long long sigcntxp);
-void __sanitizer_syscall_post_impl_compat_16___sigreturn14(long long res,
-                                                           long long sigcntxp);
-void __sanitizer_syscall_pre_impl___getcwd(long long bufp, long long length);
-void __sanitizer_syscall_post_impl___getcwd(long long res, long long bufp,
-                                            long long length);
-void __sanitizer_syscall_pre_impl_fchroot(long long fd);
-void __sanitizer_syscall_post_impl_fchroot(long long res, long long fd);
-void __sanitizer_syscall_pre_impl_compat_30_fhopen(long long fhp,
-                                                   long long flags);
-void __sanitizer_syscall_post_impl_compat_30_fhopen(long long res,
-                                                    long long fhp,
-                                                    long long flags);
-void __sanitizer_syscall_pre_impl_compat_30_fhstat(long long fhp, long long sb);
-void __sanitizer_syscall_post_impl_compat_30_fhstat(long long res,
-                                                    long long fhp,
-                                                    long long sb);
-void __sanitizer_syscall_pre_impl_compat_20_fhstatfs(long long fhp,
-                                                     long long buf);
-void __sanitizer_syscall_post_impl_compat_20_fhstatfs(long long res,
-                                                      long long fhp,
-                                                      long long buf);
-void __sanitizer_syscall_pre_impl_compat_50_____semctl13(long long semid,
-                                                         long long semnum,
-                                                         long long cmd,
-                                                         long long arg);
-void __sanitizer_syscall_post_impl_compat_50_____semctl13(long long res,
-                                                          long long semid,
-                                                          long long semnum,
-                                                          long long cmd,
-                                                          long long arg);
-void __sanitizer_syscall_pre_impl_compat_50___msgctl13(long long msqid,
-                                                       long long cmd,
-                                                       long long buf);
-void __sanitizer_syscall_post_impl_compat_50___msgctl13(long long res,
-                                                        long long msqid,
-                                                        long long cmd,
-                                                        long long buf);
-void __sanitizer_syscall_pre_impl_compat_50___shmctl13(long long shmid,
-                                                       long long cmd,
-                                                       long long buf);
-void __sanitizer_syscall_post_impl_compat_50___shmctl13(long long res,
-                                                        long long shmid,
-                                                        long long cmd,
-                                                        long long buf);
-void __sanitizer_syscall_pre_impl_lchflags(long long path, long long flags);
-void __sanitizer_syscall_post_impl_lchflags(long long res, long long path,
-                                            long long flags);
-void __sanitizer_syscall_pre_impl_issetugid(void);
-void __sanitizer_syscall_post_impl_issetugid(long long res);
-void __sanitizer_syscall_pre_impl_utrace(long long label, long long addr,
-                                         long long len);
-void __sanitizer_syscall_post_impl_utrace(long long res, long long label,
-                                          long long addr, long long len);
-void __sanitizer_syscall_pre_impl_getcontext(long long ucp);
-void __sanitizer_syscall_post_impl_getcontext(long long res, long long ucp);
-void __sanitizer_syscall_pre_impl_setcontext(long long ucp);
-void __sanitizer_syscall_post_impl_setcontext(long long res, long long ucp);
-void __sanitizer_syscall_pre_impl__lwp_create(long long ucp, long long flags,
-                                              long long new_lwp);
-void __sanitizer_syscall_post_impl__lwp_create(long long res, long long ucp,
-                                               long long flags,
-                                               long long new_lwp);
-void __sanitizer_syscall_pre_impl__lwp_exit(void);
-void __sanitizer_syscall_post_impl__lwp_exit(long long res);
-void __sanitizer_syscall_pre_impl__lwp_self(void);
-void __sanitizer_syscall_post_impl__lwp_self(long long res);
-void __sanitizer_syscall_pre_impl__lwp_wait(long long wait_for,
-                                            long long departed);
-void __sanitizer_syscall_post_impl__lwp_wait(long long res, long long wait_for,
-                                             long long departed);
-void __sanitizer_syscall_pre_impl__lwp_suspend(long long target);
-void __sanitizer_syscall_post_impl__lwp_suspend(long long res,
-                                                long long target);
-void __sanitizer_syscall_pre_impl__lwp_continue(long long target);
-void __sanitizer_syscall_post_impl__lwp_continue(long long res,
-                                                 long long target);
-void __sanitizer_syscall_pre_impl__lwp_wakeup(long long target);
-void __sanitizer_syscall_post_impl__lwp_wakeup(long long res, long long target);
-void __sanitizer_syscall_pre_impl__lwp_getprivate(void);
-void __sanitizer_syscall_post_impl__lwp_getprivate(long long res);
-void __sanitizer_syscall_pre_impl__lwp_setprivate(long long ptr);
-void __sanitizer_syscall_post_impl__lwp_setprivate(long long res,
-                                                   long long ptr);
-void __sanitizer_syscall_pre_impl__lwp_kill(long long target, long long signo);
-void __sanitizer_syscall_post_impl__lwp_kill(long long res, long long target,
-                                             long long signo);
-void __sanitizer_syscall_pre_impl__lwp_detach(long long target);
-void __sanitizer_syscall_post_impl__lwp_detach(long long res, long long target);
-void __sanitizer_syscall_pre_impl_compat_50__lwp_park(long long ts,
-                                                      long long unpark,
-                                                      long long hint,
-                                                      long long unparkhint);
-void __sanitizer_syscall_post_impl_compat_50__lwp_park(long long res,
-                                                       long long ts,
-                                                       long long unpark,
-                                                       long long hint,
-                                                       long long unparkhint);
-void __sanitizer_syscall_pre_impl__lwp_unpark(long long target, long long hint);
-void __sanitizer_syscall_post_impl__lwp_unpark(long long res, long long target,
-                                               long long hint);
-void __sanitizer_syscall_pre_impl__lwp_unpark_all(long long targets,
-                                                  long long ntargets,
-                                                  long long hint);
-void __sanitizer_syscall_post_impl__lwp_unpark_all(long long res,
-                                                   long long targets,
-                                                   long long ntargets,
-                                                   long long hint);
-void __sanitizer_syscall_pre_impl__lwp_setname(long long target,
-                                               long long name);
-void __sanitizer_syscall_post_impl__lwp_setname(long long res, long long target,
-                                                long long name);
-void __sanitizer_syscall_pre_impl__lwp_getname(long long target, long long name,
-                                               long long len);
-void __sanitizer_syscall_post_impl__lwp_getname(long long res, long long target,
-                                                long long name, long long len);
-void __sanitizer_syscall_pre_impl__lwp_ctl(long long features,
-                                           long long address);
-void __sanitizer_syscall_post_impl__lwp_ctl(long long res, long long features,
-                                            long long address);
-/* syscall 326 has been skipped */
-/* syscall 327 has been skipped */
-/* syscall 328 has been skipped */
-/* syscall 329 has been skipped */
-void __sanitizer_syscall_pre_impl_compat_60_sa_register(
-    long long newv, long long oldv, long long flags,
-    long long stackinfo_offset);
-void __sanitizer_syscall_post_impl_compat_60_sa_register(
-    long long res, long long newv, long long oldv, long long flags,
-    long long stackinfo_offset);
-void __sanitizer_syscall_pre_impl_compat_60_sa_stacks(long long num,
-                                                      long long stacks);
-void __sanitizer_syscall_post_impl_compat_60_sa_stacks(long long res,
-                                                       long long num,
-                                                       long long stacks);
-void __sanitizer_syscall_pre_impl_compat_60_sa_enable(void);
-void __sanitizer_syscall_post_impl_compat_60_sa_enable(long long res);
-void __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency(
-    long long concurrency);
-void __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency(
-    long long res, long long concurrency);
-void __sanitizer_syscall_pre_impl_compat_60_sa_yield(void);
-void __sanitizer_syscall_post_impl_compat_60_sa_yield(long long res);
-void __sanitizer_syscall_pre_impl_compat_60_sa_preempt(long long sa_id);
-void __sanitizer_syscall_post_impl_compat_60_sa_preempt(long long res,
-                                                        long long sa_id);
-/* syscall 336 has been skipped */
-/* syscall 337 has been skipped */
-/* syscall 338 has been skipped */
-/* syscall 339 has been skipped */
-void __sanitizer_syscall_pre_impl___sigaction_sigtramp(long long signum,
-                                                       long long nsa,
-                                                       long long osa,
-                                                       long long tramp,
-                                                       long long vers);
-void __sanitizer_syscall_post_impl___sigaction_sigtramp(
-    long long res, long long signum, long long nsa, long long osa,
-    long long tramp, long long vers);
-/* syscall 341 has been skipped */
-/* syscall 342 has been skipped */
-void __sanitizer_syscall_pre_impl_rasctl(long long addr, long long len,
-                                         long long op);
-void __sanitizer_syscall_post_impl_rasctl(long long res, long long addr,
-                                          long long len, long long op);
-void __sanitizer_syscall_pre_impl_kqueue(void);
-void __sanitizer_syscall_post_impl_kqueue(long long res);
-void __sanitizer_syscall_pre_impl_compat_50_kevent(
-    long long fd, long long changelist, long long nchanges, long long eventlist,
-    long long nevents, long long timeout);
-void __sanitizer_syscall_post_impl_compat_50_kevent(
-    long long res, long long fd, long long changelist, long long nchanges,
-    long long eventlist, long long nevents, long long timeout);
-void __sanitizer_syscall_pre_impl__sched_setparam(long long pid, long long lid,
-                                                  long long policy,
-                                                  long long params);
-void __sanitizer_syscall_post_impl__sched_setparam(long long res, long long pid,
-                                                   long long lid,
-                                                   long long policy,
-                                                   long long params);
-void __sanitizer_syscall_pre_impl__sched_getparam(long long pid, long long lid,
-                                                  long long policy,
-                                                  long long params);
-void __sanitizer_syscall_post_impl__sched_getparam(long long res, long long pid,
-                                                   long long lid,
-                                                   long long policy,
-                                                   long long params);
-void __sanitizer_syscall_pre_impl__sched_setaffinity(long long pid,
-                                                     long long lid,
-                                                     long long size,
-                                                     long long cpuset);
-void __sanitizer_syscall_post_impl__sched_setaffinity(long long res,
-                                                      long long pid,
-                                                      long long lid,
-                                                      long long size,
-                                                      long long cpuset);
-void __sanitizer_syscall_pre_impl__sched_getaffinity(long long pid,
-                                                     long long lid,
-                                                     long long size,
-                                                     long long cpuset);
-void __sanitizer_syscall_post_impl__sched_getaffinity(long long res,
-                                                      long long pid,
-                                                      long long lid,
-                                                      long long size,
-                                                      long long cpuset);
-void __sanitizer_syscall_pre_impl_sched_yield(void);
-void __sanitizer_syscall_post_impl_sched_yield(long long res);
-void __sanitizer_syscall_pre_impl__sched_protect(long long priority);
-void __sanitizer_syscall_post_impl__sched_protect(long long res,
-                                                  long long priority);
-/* syscall 352 has been skipped */
-/* syscall 353 has been skipped */
-void __sanitizer_syscall_pre_impl_fsync_range(long long fd, long long flags,
-                                              long long start,
-                                              long long length);
-void __sanitizer_syscall_post_impl_fsync_range(long long res, long long fd,
-                                               long long flags, long long start,
-                                               long long length);
-void __sanitizer_syscall_pre_impl_uuidgen(long long store, long long count);
-void __sanitizer_syscall_post_impl_uuidgen(long long res, long long store,
-                                           long long count);
-void __sanitizer_syscall_pre_impl_compat_90_getvfsstat(long long buf,
-                                                       long long bufsize,
-                                                       long long flags);
-void __sanitizer_syscall_post_impl_compat_90_getvfsstat(long long res,
-                                                        long long buf,
-                                                        long long bufsize,
-                                                        long long flags);
-void __sanitizer_syscall_pre_impl_compat_90_statvfs1(long long path,
-                                                     long long buf,
-                                                     long long flags);
-void __sanitizer_syscall_post_impl_compat_90_statvfs1(long long res,
-                                                      long long path,
-                                                      long long buf,
-                                                      long long flags);
-void __sanitizer_syscall_pre_impl_compat_90_fstatvfs1(long long fd,
-                                                      long long buf,
-                                                      long long flags);
-void __sanitizer_syscall_post_impl_compat_90_fstatvfs1(long long res,
-                                                       long long fd,
-                                                       long long buf,
-                                                       long long flags);
-void __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(long long fhp,
-                                                       long long buf,
-                                                       long long flags);
-void __sanitizer_syscall_post_impl_compat_30_fhstatvfs1(long long res,
-                                                        long long fhp,
-                                                        long long buf,
-                                                        long long flags);
-void __sanitizer_syscall_pre_impl_extattrctl(long long path, long long cmd,
-                                             long long filename,
-                                             long long attrnamespace,
-                                             long long attrname);
-void __sanitizer_syscall_post_impl_extattrctl(long long res, long long path,
-                                              long long cmd, long long filename,
-                                              long long attrnamespace,
-                                              long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_set_file(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_set_file(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_get_file(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_get_file(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_delete_file(long long path,
-                                                      long long attrnamespace,
-                                                      long long attrname);
-void __sanitizer_syscall_post_impl_extattr_delete_file(long long res,
-                                                       long long path,
-                                                       long long attrnamespace,
-                                                       long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_set_fd(long long fd,
-                                                 long long attrnamespace,
-                                                 long long attrname,
-                                                 long long data,
-                                                 long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_set_fd(long long res, long long fd,
-                                                  long long attrnamespace,
-                                                  long long attrname,
-                                                  long long data,
-                                                  long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_get_fd(long long fd,
-                                                 long long attrnamespace,
-                                                 long long attrname,
-                                                 long long data,
-                                                 long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_get_fd(long long res, long long fd,
-                                                  long long attrnamespace,
-                                                  long long attrname,
-                                                  long long data,
-                                                  long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_delete_fd(long long fd,
-                                                    long long attrnamespace,
-                                                    long long attrname);
-void __sanitizer_syscall_post_impl_extattr_delete_fd(long long res,
-                                                     long long fd,
-                                                     long long attrnamespace,
-                                                     long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_set_link(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_set_link(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_get_link(long long path,
-                                                   long long attrnamespace,
-                                                   long long attrname,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_get_link(
-    long long res, long long path, long long attrnamespace, long long attrname,
-    long long data, long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_delete_link(long long path,
-                                                      long long attrnamespace,
-                                                      long long attrname);
-void __sanitizer_syscall_post_impl_extattr_delete_link(long long res,
-                                                       long long path,
-                                                       long long attrnamespace,
-                                                       long long attrname);
-void __sanitizer_syscall_pre_impl_extattr_list_fd(long long fd,
-                                                  long long attrnamespace,
-                                                  long long data,
-                                                  long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_list_fd(long long res, long long fd,
-                                                   long long attrnamespace,
-                                                   long long data,
-                                                   long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_list_file(long long path,
-                                                    long long attrnamespace,
-                                                    long long data,
-                                                    long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_list_file(long long res,
-                                                     long long path,
-                                                     long long attrnamespace,
-                                                     long long data,
-                                                     long long nbytes);
-void __sanitizer_syscall_pre_impl_extattr_list_link(long long path,
-                                                    long long attrnamespace,
-                                                    long long data,
-                                                    long long nbytes);
-void __sanitizer_syscall_post_impl_extattr_list_link(long long res,
-                                                     long long path,
-                                                     long long attrnamespace,
-                                                     long long data,
-                                                     long long nbytes);
-void __sanitizer_syscall_pre_impl_compat_50_pselect(long long nd, long long in,
-                                                    long long ou, long long ex,
-                                                    long long ts,
-                                                    long long mask);
-void __sanitizer_syscall_post_impl_compat_50_pselect(long long res,
-                                                     long long nd, long long in,
-                                                     long long ou, long long ex,
-                                                     long long ts,
-                                                     long long mask);
-void __sanitizer_syscall_pre_impl_compat_50_pollts(long long fds,
-                                                   long long nfds, long long ts,
-                                                   long long mask);
-void __sanitizer_syscall_post_impl_compat_50_pollts(
-    long long res, long long fds, long long nfds, long long ts, long long mask);
-void __sanitizer_syscall_pre_impl_setxattr(long long path, long long name,
-                                           long long value, long long size,
-                                           long long flags);
-void __sanitizer_syscall_post_impl_setxattr(long long res, long long path,
-                                            long long name, long long value,
-                                            long long size, long long flags);
-void __sanitizer_syscall_pre_impl_lsetxattr(long long path, long long name,
-                                            long long value, long long size,
-                                            long long flags);
-void __sanitizer_syscall_post_impl_lsetxattr(long long res, long long path,
-                                             long long name, long long value,
-                                             long long size, long long flags);
-void __sanitizer_syscall_pre_impl_fsetxattr(long long fd, long long name,
-                                            long long value, long long size,
-                                            long long flags);
-void __sanitizer_syscall_post_impl_fsetxattr(long long res, long long fd,
-                                             long long name, long long value,
-                                             long long size, long long flags);
-void __sanitizer_syscall_pre_impl_getxattr(long long path, long long name,
-                                           long long value, long long size);
-void __sanitizer_syscall_post_impl_getxattr(long long res, long long path,
-                                            long long name, long long value,
-                                            long long size);
-void __sanitizer_syscall_pre_impl_lgetxattr(long long path, long long name,
-                                            long long value, long long size);
-void __sanitizer_syscall_post_impl_lgetxattr(long long res, long long path,
-                                             long long name, long long value,
-                                             long long size);
-void __sanitizer_syscall_pre_impl_fgetxattr(long long fd, long long name,
-                                            long long value, long long size);
-void __sanitizer_syscall_post_impl_fgetxattr(long long res, long long fd,
-                                             long long name, long long value,
-                                             long long size);
-void __sanitizer_syscall_pre_impl_listxattr(long long path, long long list,
-                                            long long size);
-void __sanitizer_syscall_post_impl_listxattr(long long res, long long path,
-                                             long long list, long long size);
-void __sanitizer_syscall_pre_impl_llistxattr(long long path, long long list,
-                                             long long size);
-void __sanitizer_syscall_post_impl_llistxattr(long long res, long long path,
-                                              long long list, long long size);
-void __sanitizer_syscall_pre_impl_flistxattr(long long fd, long long list,
-                                             long long size);
-void __sanitizer_syscall_post_impl_flistxattr(long long res, long long fd,
-                                              long long list, long long size);
-void __sanitizer_syscall_pre_impl_removexattr(long long path, long long name);
-void __sanitizer_syscall_post_impl_removexattr(long long res, long long path,
-                                               long long name);
-void __sanitizer_syscall_pre_impl_lremovexattr(long long path, long long name);
-void __sanitizer_syscall_post_impl_lremovexattr(long long res, long long path,
-                                                long long name);
-void __sanitizer_syscall_pre_impl_fremovexattr(long long fd, long long name);
-void __sanitizer_syscall_post_impl_fremovexattr(long long res, long long fd,
-                                                long long name);
-void __sanitizer_syscall_pre_impl_compat_50___stat30(long long path,
-                                                     long long ub);
-void __sanitizer_syscall_post_impl_compat_50___stat30(long long res,
-                                                      long long path,
-                                                      long long ub);
-void __sanitizer_syscall_pre_impl_compat_50___fstat30(long long fd,
-                                                      long long sb);
-void __sanitizer_syscall_post_impl_compat_50___fstat30(long long res,
-                                                       long long fd,
-                                                       long long sb);
-void __sanitizer_syscall_pre_impl_compat_50___lstat30(long long path,
-                                                      long long ub);
-void __sanitizer_syscall_post_impl_compat_50___lstat30(long long res,
-                                                       long long path,
-                                                       long long ub);
-void __sanitizer_syscall_pre_impl___getdents30(long long fd, long long buf,
-                                               long long count);
-void __sanitizer_syscall_post_impl___getdents30(long long res, long long fd,
-                                                long long buf, long long count);
-void __sanitizer_syscall_pre_impl_posix_fadvise(long long);
-void __sanitizer_syscall_post_impl_posix_fadvise(long long res, long long);
-void __sanitizer_syscall_pre_impl_compat_30___fhstat30(long long fhp,
-                                                       long long sb);
-void __sanitizer_syscall_post_impl_compat_30___fhstat30(long long res,
-                                                        long long fhp,
-                                                        long long sb);
-void __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30(long long ntvp);
-void __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(long long res,
-                                                             long long ntvp);
-void __sanitizer_syscall_pre_impl___socket30(long long domain, long long type,
-                                             long long protocol);
-void __sanitizer_syscall_post_impl___socket30(long long res, long long domain,
-                                              long long type,
-                                              long long protocol);
-void __sanitizer_syscall_pre_impl___getfh30(long long fname, long long fhp,
-                                            long long fh_size);
-void __sanitizer_syscall_post_impl___getfh30(long long res, long long fname,
-                                             long long fhp, long long fh_size);
-void __sanitizer_syscall_pre_impl___fhopen40(long long fhp, long long fh_size,
-                                             long long flags);
-void __sanitizer_syscall_post_impl___fhopen40(long long res, long long fhp,
-                                              long long fh_size,
-                                              long long flags);
-void __sanitizer_syscall_pre_impl_compat_90_fhstatvfs1(long long fhp,
-                                                       long long fh_size,
-                                                       long long buf,
-                                                       long long flags);
-void __sanitizer_syscall_post_impl_compat_90_fhstatvfs1(long long res,
-                                                        long long fhp,
-                                                        long long fh_size,
-                                                        long long buf,
-                                                        long long flags);
-void __sanitizer_syscall_pre_impl_compat_50___fhstat40(long long fhp,
-                                                       long long fh_size,
-                                                       long long sb);
-void __sanitizer_syscall_post_impl_compat_50___fhstat40(long long res,
-                                                        long long fhp,
-                                                        long long fh_size,
-                                                        long long sb);
-void __sanitizer_syscall_pre_impl_aio_cancel(long long fildes,
-                                             long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_cancel(long long res, long long fildes,
-                                              long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_error(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_error(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_fsync(long long op, long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_fsync(long long res, long long op,
-                                             long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_read(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_read(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_aio_return(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_return(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_compat_50_aio_suspend(long long list,
-                                                        long long nent,
-                                                        long long timeout);
-void __sanitizer_syscall_post_impl_compat_50_aio_suspend(long long res,
-                                                         long long list,
-                                                         long long nent,
-                                                         long long timeout);
-void __sanitizer_syscall_pre_impl_aio_write(long long aiocbp);
-void __sanitizer_syscall_post_impl_aio_write(long long res, long long aiocbp);
-void __sanitizer_syscall_pre_impl_lio_listio(long long mode, long long list,
-                                             long long nent, long long sig);
-void __sanitizer_syscall_post_impl_lio_listio(long long res, long long mode,
-                                              long long list, long long nent,
-                                              long long sig);
-/* syscall 407 has been skipped */
-/* syscall 408 has been skipped */
-/* syscall 409 has been skipped */
-void __sanitizer_syscall_pre_impl___mount50(long long type, long long path,
-                                            long long flags, long long data,
-                                            long long data_len);
-void __sanitizer_syscall_post_impl___mount50(long long res, long long type,
-                                             long long path, long long flags,
-                                             long long data,
-                                             long long data_len);
-void __sanitizer_syscall_pre_impl_mremap(long long old_address,
-                                         long long old_size,
-                                         long long new_address,
-                                         long long new_size, long long flags);
-void __sanitizer_syscall_post_impl_mremap(long long res, long long old_address,
-                                          long long old_size,
-                                          long long new_address,
-                                          long long new_size, long long flags);
-void __sanitizer_syscall_pre_impl_pset_create(long long psid);
-void __sanitizer_syscall_post_impl_pset_create(long long res, long long psid);
-void __sanitizer_syscall_pre_impl_pset_destroy(long long psid);
-void __sanitizer_syscall_post_impl_pset_destroy(long long res, long long psid);
-void __sanitizer_syscall_pre_impl_pset_assign(long long psid, long long cpuid,
-                                              long long opsid);
-void __sanitizer_syscall_post_impl_pset_assign(long long res, long long psid,
-                                               long long cpuid,
-                                               long long opsid);
-void __sanitizer_syscall_pre_impl__pset_bind(long long idtype,
-                                             long long first_id,
-                                             long long second_id,
-                                             long long psid, long long opsid);
-void __sanitizer_syscall_post_impl__pset_bind(long long res, long long idtype,
-                                              long long first_id,
-                                              long long second_id,
-                                              long long psid, long long opsid);
-void __sanitizer_syscall_pre_impl___posix_fadvise50(long long fd, long long PAD,
-                                                    long long offset,
-                                                    long long len,
-                                                    long long advice);
-void __sanitizer_syscall_post_impl___posix_fadvise50(
-    long long res, long long fd, long long PAD, long long offset, long long len,
-    long long advice);
-void __sanitizer_syscall_pre_impl___select50(long long nd, long long in,
-                                             long long ou, long long ex,
-                                             long long tv);
-void __sanitizer_syscall_post_impl___select50(long long res, long long nd,
-                                              long long in, long long ou,
-                                              long long ex, long long tv);
-void __sanitizer_syscall_pre_impl___gettimeofday50(long long tp, long long tzp);
-void __sanitizer_syscall_post_impl___gettimeofday50(long long res, long long tp,
-                                                    long long tzp);
-void __sanitizer_syscall_pre_impl___settimeofday50(long long tv, long long tzp);
-void __sanitizer_syscall_post_impl___settimeofday50(long long res, long long tv,
-                                                    long long tzp);
-void __sanitizer_syscall_pre_impl___utimes50(long long path, long long tptr);
-void __sanitizer_syscall_post_impl___utimes50(long long res, long long path,
-                                              long long tptr);
-void __sanitizer_syscall_pre_impl___adjtime50(long long delta,
-                                              long long olddelta);
-void __sanitizer_syscall_post_impl___adjtime50(long long res, long long delta,
-                                               long long olddelta);
-void __sanitizer_syscall_pre_impl___lfs_segwait50(long long fsidp,
-                                                  long long tv);
-void __sanitizer_syscall_post_impl___lfs_segwait50(long long res,
-                                                   long long fsidp,
-                                                   long long tv);
-void __sanitizer_syscall_pre_impl___futimes50(long long fd, long long tptr);
-void __sanitizer_syscall_post_impl___futimes50(long long res, long long fd,
-                                               long long tptr);
-void __sanitizer_syscall_pre_impl___lutimes50(long long path, long long tptr);
-void __sanitizer_syscall_post_impl___lutimes50(long long res, long long path,
-                                               long long tptr);
-void __sanitizer_syscall_pre_impl___setitimer50(long long which, long long itv,
-                                                long long oitv);
-void __sanitizer_syscall_post_impl___setitimer50(long long res, long long which,
-                                                 long long itv, long long oitv);
-void __sanitizer_syscall_pre_impl___getitimer50(long long which, long long itv);
-void __sanitizer_syscall_post_impl___getitimer50(long long res, long long which,
-                                                 long long itv);
-void __sanitizer_syscall_pre_impl___clock_gettime50(long long clock_id,
-                                                    long long tp);
-void __sanitizer_syscall_post_impl___clock_gettime50(long long res,
-                                                     long long clock_id,
-                                                     long long tp);
-void __sanitizer_syscall_pre_impl___clock_settime50(long long clock_id,
-                                                    long long tp);
-void __sanitizer_syscall_post_impl___clock_settime50(long long res,
-                                                     long long clock_id,
-                                                     long long tp);
-void __sanitizer_syscall_pre_impl___clock_getres50(long long clock_id,
-                                                   long long tp);
-void __sanitizer_syscall_post_impl___clock_getres50(long long res,
-                                                    long long clock_id,
-                                                    long long tp);
-void __sanitizer_syscall_pre_impl___nanosleep50(long long rqtp, long long rmtp);
-void __sanitizer_syscall_post_impl___nanosleep50(long long res, long long rqtp,
-                                                 long long rmtp);
-void __sanitizer_syscall_pre_impl_____sigtimedwait50(long long set,
-                                                     long long info,
-                                                     long long timeout);
-void __sanitizer_syscall_post_impl_____sigtimedwait50(long long res,
-                                                      long long set,
-                                                      long long info,
-                                                      long long timeout);
-void __sanitizer_syscall_pre_impl___mq_timedsend50(long long mqdes,
-                                                   long long msg_ptr,
-                                                   long long msg_len,
-                                                   long long msg_prio,
-                                                   long long abs_timeout);
-void __sanitizer_syscall_post_impl___mq_timedsend50(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-void __sanitizer_syscall_pre_impl___mq_timedreceive50(long long mqdes,
-                                                      long long msg_ptr,
-                                                      long long msg_len,
-                                                      long long msg_prio,
-                                                      long long abs_timeout);
-void __sanitizer_syscall_post_impl___mq_timedreceive50(
-    long long res, long long mqdes, long long msg_ptr, long long msg_len,
-    long long msg_prio, long long abs_timeout);
-void __sanitizer_syscall_pre_impl_compat_60__lwp_park(long long ts,
-                                                      long long unpark,
-                                                      long long hint,
-                                                      long long unparkhint);
-void __sanitizer_syscall_post_impl_compat_60__lwp_park(long long res,
-                                                       long long ts,
-                                                       long long unpark,
-                                                       long long hint,
-                                                       long long unparkhint);
-void __sanitizer_syscall_pre_impl___kevent50(long long fd, long long changelist,
-                                             long long nchanges,
-                                             long long eventlist,
-                                             long long nevents,
-                                             long long timeout);
-void __sanitizer_syscall_post_impl___kevent50(
-    long long res, long long fd, long long changelist, long long nchanges,
-    long long eventlist, long long nevents, long long timeout);
-void __sanitizer_syscall_pre_impl___pselect50(long long nd, long long in,
-                                              long long ou, long long ex,
-                                              long long ts, long long mask);
-void __sanitizer_syscall_post_impl___pselect50(long long res, long long nd,
-                                               long long in, long long ou,
-                                               long long ex, long long ts,
-                                               long long mask);
-void __sanitizer_syscall_pre_impl___pollts50(long long fds, long long nfds,
-                                             long long ts, long long mask);
-void __sanitizer_syscall_post_impl___pollts50(long long res, long long fds,
-                                              long long nfds, long long ts,
-                                              long long mask);
-void __sanitizer_syscall_pre_impl___aio_suspend50(long long list,
-                                                  long long nent,
-                                                  long long timeout);
-void __sanitizer_syscall_post_impl___aio_suspend50(long long res,
-                                                   long long list,
-                                                   long long nent,
-                                                   long long timeout);
-void __sanitizer_syscall_pre_impl___stat50(long long path, long long ub);
-void __sanitizer_syscall_post_impl___stat50(long long res, long long path,
-                                            long long ub);
-void __sanitizer_syscall_pre_impl___fstat50(long long fd, long long sb);
-void __sanitizer_syscall_post_impl___fstat50(long long res, long long fd,
-                                             long long sb);
-void __sanitizer_syscall_pre_impl___lstat50(long long path, long long ub);
-void __sanitizer_syscall_post_impl___lstat50(long long res, long long path,
-                                             long long ub);
-void __sanitizer_syscall_pre_impl_____semctl50(long long semid,
-                                               long long semnum, long long cmd,
-                                               long long arg);
-void __sanitizer_syscall_post_impl_____semctl50(long long res, long long semid,
-                                                long long semnum, long long cmd,
-                                                long long arg);
-void __sanitizer_syscall_pre_impl___shmctl50(long long shmid, long long cmd,
-                                             long long buf);
-void __sanitizer_syscall_post_impl___shmctl50(long long res, long long shmid,
-                                              long long cmd, long long buf);
-void __sanitizer_syscall_pre_impl___msgctl50(long long msqid, long long cmd,
-                                             long long buf);
-void __sanitizer_syscall_post_impl___msgctl50(long long res, long long msqid,
-                                              long long cmd, long long buf);
-void __sanitizer_syscall_pre_impl___getrusage50(long long who,
-                                                long long rusage);
-void __sanitizer_syscall_post_impl___getrusage50(long long res, long long who,
-                                                 long long rusage);
-void __sanitizer_syscall_pre_impl___timer_settime50(long long timerid,
-                                                    long long flags,
-                                                    long long value,
-                                                    long long ovalue);
-void __sanitizer_syscall_post_impl___timer_settime50(long long res,
-                                                     long long timerid,
-                                                     long long flags,
-                                                     long long value,
-                                                     long long ovalue);
-void __sanitizer_syscall_pre_impl___timer_gettime50(long long timerid,
-                                                    long long value);
-void __sanitizer_syscall_post_impl___timer_gettime50(long long res,
-                                                     long long timerid,
-                                                     long long value);
-#if defined(NTP) || !defined(_KERNEL_OPT)
-void __sanitizer_syscall_pre_impl___ntp_gettime50(long long ntvp);
-void __sanitizer_syscall_post_impl___ntp_gettime50(long long res,
-                                                   long long ntvp);
-#else
-/* syscall 448 has been skipped */
-#endif
-void __sanitizer_syscall_pre_impl___wait450(long long pid, long long status,
-                                            long long options,
-                                            long long rusage);
-void __sanitizer_syscall_post_impl___wait450(long long res, long long pid,
-                                             long long status,
-                                             long long options,
-                                             long long rusage);
-void __sanitizer_syscall_pre_impl___mknod50(long long path, long long mode,
-                                            long long dev);
-void __sanitizer_syscall_post_impl___mknod50(long long res, long long path,
-                                             long long mode, long long dev);
-void __sanitizer_syscall_pre_impl___fhstat50(long long fhp, long long fh_size,
-                                             long long sb);
-void __sanitizer_syscall_post_impl___fhstat50(long long res, long long fhp,
-                                              long long fh_size, long long sb);
-/* syscall 452 has been skipped */
-void __sanitizer_syscall_pre_impl_pipe2(long long fildes, long long flags);
-void __sanitizer_syscall_post_impl_pipe2(long long res, long long fildes,
-                                         long long flags);
-void __sanitizer_syscall_pre_impl_dup3(long long from, long long to,
-                                       long long flags);
-void __sanitizer_syscall_post_impl_dup3(long long res, long long from,
-                                        long long to, long long flags);
-void __sanitizer_syscall_pre_impl_kqueue1(long long flags);
-void __sanitizer_syscall_post_impl_kqueue1(long long res, long long flags);
-void __sanitizer_syscall_pre_impl_paccept(long long s, long long name,
-                                          long long anamelen, long long mask,
-                                          long long flags);
-void __sanitizer_syscall_post_impl_paccept(long long res, long long s,
-                                           long long name, long long anamelen,
-                                           long long mask, long long flags);
-void __sanitizer_syscall_pre_impl_linkat(long long fd1, long long name1,
-                                         long long fd2, long long name2,
-                                         long long flags);
-void __sanitizer_syscall_post_impl_linkat(long long res, long long fd1,
-                                          long long name1, long long fd2,
-                                          long long name2, long long flags);
-void __sanitizer_syscall_pre_impl_renameat(long long fromfd, long long from,
-                                           long long tofd, long long to);
-void __sanitizer_syscall_post_impl_renameat(long long res, long long fromfd,
-                                            long long from, long long tofd,
-                                            long long to);
-void __sanitizer_syscall_pre_impl_mkfifoat(long long fd, long long path,
-                                           long long mode);
-void __sanitizer_syscall_post_impl_mkfifoat(long long res, long long fd,
-                                            long long path, long long mode);
-void __sanitizer_syscall_pre_impl_mknodat(long long fd, long long path,
-                                          long long mode, long long PAD,
-                                          long long dev);
-void __sanitizer_syscall_post_impl_mknodat(long long res, long long fd,
-                                           long long path, long long mode,
-                                           long long PAD, long long dev);
-void __sanitizer_syscall_pre_impl_mkdirat(long long fd, long long path,
-                                          long long mode);
-void __sanitizer_syscall_post_impl_mkdirat(long long res, long long fd,
-                                           long long path, long long mode);
-void __sanitizer_syscall_pre_impl_faccessat(long long fd, long long path,
-                                            long long amode, long long flag);
-void __sanitizer_syscall_post_impl_faccessat(long long res, long long fd,
-                                             long long path, long long amode,
-                                             long long flag);
-void __sanitizer_syscall_pre_impl_fchmodat(long long fd, long long path,
-                                           long long mode, long long flag);
-void __sanitizer_syscall_post_impl_fchmodat(long long res, long long fd,
-                                            long long path, long long mode,
-                                            long long flag);
-void __sanitizer_syscall_pre_impl_fchownat(long long fd, long long path,
-                                           long long owner, long long group,
-                                           long long flag);
-void __sanitizer_syscall_post_impl_fchownat(long long res, long long fd,
-                                            long long path, long long owner,
-                                            long long group, long long flag);
-void __sanitizer_syscall_pre_impl_fexecve(long long fd, long long argp,
-                                          long long envp);
-void __sanitizer_syscall_post_impl_fexecve(long long res, long long fd,
-                                           long long argp, long long envp);
-void __sanitizer_syscall_pre_impl_fstatat(long long fd, long long path,
-                                          long long buf, long long flag);
-void __sanitizer_syscall_post_impl_fstatat(long long res, long long fd,
-                                           long long path, long long buf,
-                                           long long flag);
-void __sanitizer_syscall_pre_impl_utimensat(long long fd, long long path,
-                                            long long tptr, long long flag);
-void __sanitizer_syscall_post_impl_utimensat(long long res, long long fd,
-                                             long long path, long long tptr,
-                                             long long flag);
-void __sanitizer_syscall_pre_impl_openat(long long fd, long long path,
-                                         long long oflags, long long mode);
-void __sanitizer_syscall_post_impl_openat(long long res, long long fd,
-                                          long long path, long long oflags,
-                                          long long mode);
-void __sanitizer_syscall_pre_impl_readlinkat(long long fd, long long path,
-                                             long long buf, long long bufsize);
-void __sanitizer_syscall_post_impl_readlinkat(long long res, long long fd,
-                                              long long path, long long buf,
-                                              long long bufsize);
-void __sanitizer_syscall_pre_impl_symlinkat(long long path1, long long fd,
-                                            long long path2);
-void __sanitizer_syscall_post_impl_symlinkat(long long res, long long path1,
-                                             long long fd, long long path2);
-void __sanitizer_syscall_pre_impl_unlinkat(long long fd, long long path,
-                                           long long flag);
-void __sanitizer_syscall_post_impl_unlinkat(long long res, long long fd,
-                                            long long path, long long flag);
-void __sanitizer_syscall_pre_impl_futimens(long long fd, long long tptr);
-void __sanitizer_syscall_post_impl_futimens(long long res, long long fd,
-                                            long long tptr);
-void __sanitizer_syscall_pre_impl___quotactl(long long path, long long args);
-void __sanitizer_syscall_post_impl___quotactl(long long res, long long path,
-                                              long long args);
-void __sanitizer_syscall_pre_impl_posix_spawn(long long pid, long long path,
-                                              long long file_actions,
-                                              long long attrp, long long argv,
-                                              long long envp);
-void __sanitizer_syscall_post_impl_posix_spawn(long long res, long long pid,
-                                               long long path,
-                                               long long file_actions,
-                                               long long attrp, long long argv,
-                                               long long envp);
-void __sanitizer_syscall_pre_impl_recvmmsg(long long s, long long mmsg,
-                                           long long vlen, long long flags,
-                                           long long timeout);
-void __sanitizer_syscall_post_impl_recvmmsg(long long res, long long s,
-                                            long long mmsg, long long vlen,
-                                            long long flags, long long timeout);
-void __sanitizer_syscall_pre_impl_sendmmsg(long long s, long long mmsg,
-                                           long long vlen, long long flags);
-void __sanitizer_syscall_post_impl_sendmmsg(long long res, long long s,
-                                            long long mmsg, long long vlen,
-                                            long long flags);
-void __sanitizer_syscall_pre_impl_clock_nanosleep(long long clock_id,
-                                                  long long flags,
-                                                  long long rqtp,
-                                                  long long rmtp);
-void __sanitizer_syscall_post_impl_clock_nanosleep(long long res,
-                                                   long long clock_id,
-                                                   long long flags,
-                                                   long long rqtp,
-                                                   long long rmtp);
-void __sanitizer_syscall_pre_impl____lwp_park60(long long clock_id,
-                                                long long flags, long long ts,
-                                                long long unpark,
-                                                long long hint,
-                                                long long unparkhint);
-void __sanitizer_syscall_post_impl____lwp_park60(
-    long long res, long long clock_id, long long flags, long long ts,
-    long long unpark, long long hint, long long unparkhint);
-void __sanitizer_syscall_pre_impl_posix_fallocate(long long fd, long long PAD,
-                                                  long long pos, long long len);
-void __sanitizer_syscall_post_impl_posix_fallocate(long long res, long long fd,
-                                                   long long PAD, long long pos,
-                                                   long long len);
-void __sanitizer_syscall_pre_impl_fdiscard(long long fd, long long PAD,
-                                           long long pos, long long len);
-void __sanitizer_syscall_post_impl_fdiscard(long long res, long long fd,
-                                            long long PAD, long long pos,
-                                            long long len);
-void __sanitizer_syscall_pre_impl_wait6(long long idtype, long long id,
-                                        long long status, long long options,
-                                        long long wru, long long info);
-void __sanitizer_syscall_post_impl_wait6(long long res, long long idtype,
-                                         long long id, long long status,
-                                         long long options, long long wru,
-                                         long long info);
-void __sanitizer_syscall_pre_impl_clock_getcpuclockid2(long long idtype,
-                                                       long long id,
-                                                       long long clock_id);
-void __sanitizer_syscall_post_impl_clock_getcpuclockid2(long long res,
-                                                        long long idtype,
-                                                        long long id,
-                                                        long long clock_id);
-void __sanitizer_syscall_pre_impl___getvfsstat90(long long buf,
-                                                 long long bufsize,
-                                                 long long flags);
-void __sanitizer_syscall_post_impl___getvfsstat90(long long res, long long buf,
-                                                  long long bufsize,
-                                                  long long flags);
-void __sanitizer_syscall_pre_impl___statvfs190(long long path, long long buf,
-                                               long long flags);
-void __sanitizer_syscall_post_impl___statvfs190(long long res, long long path,
-                                                long long buf, long long flags);
-void __sanitizer_syscall_pre_impl___fstatvfs190(long long fd, long long buf,
-                                                long long flags);
-void __sanitizer_syscall_post_impl___fstatvfs190(long long res, long long fd,
-                                                 long long buf,
-                                                 long long flags);
-void __sanitizer_syscall_pre_impl___fhstatvfs190(long long fhp,
-                                                 long long fh_size,
-                                                 long long buf,
-                                                 long long flags);
-void __sanitizer_syscall_post_impl___fhstatvfs190(long long res, long long fhp,
-                                                  long long fh_size,
-                                                  long long buf,
-                                                  long long flags);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
-
-#endif // SANITIZER_NETBSD_SYSCALL_HOOKS_H
diff --git a/linux-x86/lib64/clang/11.0.5/include/wasm_simd128.h b/linux-x86/lib64/clang/11.0.5/include/wasm_simd128.h
deleted file mode 100644
index b781238..0000000
--- a/linux-x86/lib64/clang/11.0.5/include/wasm_simd128.h
+++ /dev/null
@@ -1,1133 +0,0 @@
-/*===---- wasm_simd128.h - WebAssembly portable SIMD intrinsics ------------===
- *
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
- *
- *===-----------------------------------------------------------------------===
- */
-
-#ifndef __WASM_SIMD128_H
-#define __WASM_SIMD128_H
-
-#include <stdbool.h>
-#include <stdint.h>
-
-// User-facing type
-typedef int32_t v128_t __attribute__((__vector_size__(16), __aligned__(16)));
-
-// Internal types determined by clang builtin definitions
-typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef signed char __s8x16
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned char __u8x16
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef short __i16x8 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned short __u16x8
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef int __i32x4 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned int __u32x4
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef long long __i64x2 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef unsigned long long __u64x2
-    __attribute__((__vector_size__(16), __aligned__(16)));
-typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
-
-#define __DEFAULT_FN_ATTRS                                                     \
-  __attribute__((__always_inline__, __nodebug__, __target__("simd128"),        \
-                 __min_vector_width__(128)))
-
-#define __REQUIRE_CONSTANT(e)                                                  \
-  _Static_assert(__builtin_constant_p(e), "Expected constant")
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
-  // UB-free unaligned access copied from xmmintrin.h
-  struct __wasm_v128_load_struct {
-    __v128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  return ((const struct __wasm_v128_load_struct *)__mem)->__v;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v8x16_load_splat(const void *__mem) {
-  struct __wasm_v8x16_load_splat_struct {
-    uint8_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint8_t __v = ((const struct __wasm_v8x16_load_splat_struct *)__mem)->__v;
-  return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
-                           __v, __v, __v, __v, __v, __v, __v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v16x8_load_splat(const void *__mem) {
-  struct __wasm_v16x8_load_splat_struct {
-    uint16_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint16_t __v = ((const struct __wasm_v16x8_load_splat_struct *)__mem)->__v;
-  return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v32x4_load_splat(const void *__mem) {
-  struct __wasm_v32x4_load_splat_struct {
-    uint32_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint32_t __v = ((const struct __wasm_v32x4_load_splat_struct *)__mem)->__v;
-  return (v128_t)(__u32x4){__v, __v, __v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v64x2_load_splat(const void *__mem) {
-  struct __wasm_v64x2_load_splat_struct {
-    uint64_t __v;
-  } __attribute__((__packed__, __may_alias__));
-  uint64_t __v = ((const struct __wasm_v64x2_load_splat_struct *)__mem)->__v;
-  return (v128_t)(__u64x2){__v, __v};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_load_8x8(const void *__mem) {
-  typedef int8_t __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
-  struct __wasm_i16x8_load_8x8_struct {
-    __i8x8 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __i8x8 __v = ((const struct __wasm_i16x8_load_8x8_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __i16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_load_8x8(const void *__mem) {
-  typedef uint8_t __u8x8 __attribute__((__vector_size__(8), __aligned__(8)));
-  struct __wasm_u16x8_load_8x8_struct {
-    __u8x8 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __u8x8 __v = ((const struct __wasm_u16x8_load_8x8_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __u16x8);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_load_16x4(const void *__mem) {
-  typedef int16_t __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
-  struct __wasm_i32x4_load_16x4_struct {
-    __i16x4 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __i16x4 __v = ((const struct __wasm_i32x4_load_16x4_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __i32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_load_16x4(const void *__mem) {
-  typedef uint16_t __u16x4 __attribute__((__vector_size__(8), __aligned__(8)));
-  struct __wasm_u32x4_load_16x4_struct {
-    __u16x4 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __u16x4 __v = ((const struct __wasm_u32x4_load_16x4_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __u32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i64x2_load_32x2(const void *__mem) {
-  typedef int32_t __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
-  struct __wasm_i64x2_load_32x2_struct {
-    __i32x2 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __i32x2 __v = ((const struct __wasm_i64x2_load_32x2_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __i64x2);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u64x2_load_32x2(const void *__mem) {
-  typedef uint32_t __u32x2 __attribute__((__vector_size__(8), __aligned__(8)));
-  struct __wasm_u64x2_load_32x2_struct {
-    __u32x2 __v;
-  } __attribute__((__packed__, __may_alias__));
-  __u32x2 __v = ((const struct __wasm_u64x2_load_32x2_struct *)__mem)->__v;
-  return (v128_t) __builtin_convertvector(__v, __u64x2);
-}
-
-static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
-                                                          v128_t __a) {
-  // UB-free unaligned access copied from xmmintrin.h
-  struct __wasm_v128_store_struct {
-    __v128_u __v;
-  } __attribute__((__packed__, __may_alias__));
-  ((struct __wasm_v128_store_struct *)__mem)->__v = __a;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
-                int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
-                int8_t __c10, int8_t __c11, int8_t __c12, int8_t __c13,
-                int8_t __c14, int8_t __c15) {
-  return (v128_t)(__i8x16){__c0,  __c1,  __c2,  __c3, __c4,  __c5,
-                           __c6,  __c7,  __c8,  __c9, __c10, __c11,
-                           __c12, __c13, __c14, __c15};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_make(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
-                int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7) {
-  return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
-                                                            int32_t __c1,
-                                                            int32_t __c2,
-                                                            int32_t __c3) {
-  return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
-                                                            float __c1,
-                                                            float __c2,
-                                                            float __c3) {
-  return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
-                                                            int64_t __c1) {
-  return (v128_t)(__i64x2){__c0, __c1};
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
-                                                            double __c1) {
-  return (v128_t)(__f64x2){__c0, __c1};
-}
-
-#define wasm_i8x16_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, __c8, \
-                         __c9, __c10, __c11, __c12, __c13, __c14, __c15)       \
-  __extension__({                                                              \
-    __REQUIRE_CONSTANT(__c0);                                                  \
-    __REQUIRE_CONSTANT(__c1);                                                  \
-    __REQUIRE_CONSTANT(__c2);                                                  \
-    __REQUIRE_CONSTANT(__c3);                                                  \
-    __REQUIRE_CONSTANT(__c4);                                                  \
-    __REQUIRE_CONSTANT(__c5);                                                  \
-    __REQUIRE_CONSTANT(__c6);                                                  \
-    __REQUIRE_CONSTANT(__c7);                                                  \
-    __REQUIRE_CONSTANT(__c8);                                                  \
-    __REQUIRE_CONSTANT(__c9);                                                  \
-    __REQUIRE_CONSTANT(__c10);                                                 \
-    __REQUIRE_CONSTANT(__c11);                                                 \
-    __REQUIRE_CONSTANT(__c12);                                                 \
-    __REQUIRE_CONSTANT(__c13);                                                 \
-    __REQUIRE_CONSTANT(__c14);                                                 \
-    __REQUIRE_CONSTANT(__c15);                                                 \
-    (v128_t)(__i8x16){__c0, __c1, __c2,  __c3,  __c4,  __c5,  __c6,  __c7,     \
-                      __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15};   \
-  })
-
-#define wasm_i16x8_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)       \
-  __extension__({                                                              \
-    __REQUIRE_CONSTANT(__c0);                                                  \
-    __REQUIRE_CONSTANT(__c1);                                                  \
-    __REQUIRE_CONSTANT(__c2);                                                  \
-    __REQUIRE_CONSTANT(__c3);                                                  \
-    __REQUIRE_CONSTANT(__c4);                                                  \
-    __REQUIRE_CONSTANT(__c5);                                                  \
-    __REQUIRE_CONSTANT(__c6);                                                  \
-    __REQUIRE_CONSTANT(__c7);                                                  \
-    (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};         \
-  })
-
-#define wasm_i32x4_const(__c0, __c1, __c2, __c3)                               \
-  __extension__({                                                              \
-    __REQUIRE_CONSTANT(__c0);                                                  \
-    __REQUIRE_CONSTANT(__c1);                                                  \
-    __REQUIRE_CONSTANT(__c2);                                                  \
-    __REQUIRE_CONSTANT(__c3);                                                  \
-    (v128_t)(__i32x4){__c0, __c1, __c2, __c3};                                 \
-  })
-
-#define wasm_f32x4_const(__c0, __c1, __c2, __c3)                               \
-  __extension__({                                                              \
-    __REQUIRE_CONSTANT(__c0);                                                  \
-    __REQUIRE_CONSTANT(__c1);                                                  \
-    __REQUIRE_CONSTANT(__c2);                                                  \
-    __REQUIRE_CONSTANT(__c3);                                                  \
-    (v128_t)(__f32x4){__c0, __c1, __c2, __c3};                                 \
-  })
-
-#define wasm_i64x2_const(__c0, __c1)                                           \
-  __extension__({                                                              \
-    __REQUIRE_CONSTANT(__c0);                                                  \
-    __REQUIRE_CONSTANT(__c1);                                                  \
-    (v128_t)(__i64x2){__c0, __c1};                                             \
-  })
-
-#define wasm_f64x2_const(__c0, __c1)                                           \
-  __extension__({                                                              \
-    __REQUIRE_CONSTANT(__c0);                                                  \
-    __REQUIRE_CONSTANT(__c1);                                                  \
-    (v128_t)(__f64x2){__c0, __c1};                                             \
-  })
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
-  return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,
-                           __a, __a, __a, __a, __a, __a, __a, __a};
-}
-
-#define wasm_i8x16_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
-
-#define wasm_u8x16_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
-
-#define wasm_i8x16_replace_lane(__a, __i, __b)                                 \
-  ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
-  return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
-}
-
-#define wasm_i16x8_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
-
-#define wasm_u16x8_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
-
-#define wasm_i16x8_replace_lane(__a, __i, __b)                                 \
-  ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
-  return (v128_t)(__i32x4){__a, __a, __a, __a};
-}
-
-#define wasm_i32x4_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_i32x4((__i32x4)(__a), __i))
-
-#define wasm_i32x4_replace_lane(__a, __i, __b)                                 \
-  ((v128_t)__builtin_wasm_replace_lane_i32x4((__i32x4)(__a), __i, __b))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
-  return (v128_t)(__i64x2){__a, __a};
-}
-
-#define wasm_i64x2_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_i64x2((__i64x2)(__a), __i))
-
-#define wasm_i64x2_replace_lane(__a, __i, __b)                                 \
-  ((v128_t)__builtin_wasm_replace_lane_i64x2((__i64x2)(__a), __i, __b))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
-  return (v128_t)(__f32x4){__a, __a, __a, __a};
-}
-
-#define wasm_f32x4_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_f32x4((__f32x4)(__a), __i))
-
-#define wasm_f32x4_replace_lane(__a, __i, __b)                                 \
-  ((v128_t)__builtin_wasm_replace_lane_f32x4((__f32x4)(__a), __i, __b))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
-  return (v128_t)(__f64x2){__a, __a};
-}
-
-#define wasm_f64x2_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_f64x2((__f64x2)(__a), __i))
-
-#define wasm_f64x2_replace_lane(__a, __i, __b)                                 \
-  ((v128_t)__builtin_wasm_replace_lane_f64x2((__f64x2)(__a), __i, __b))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__s8x16)__a == (__s8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__s8x16)__a != (__s8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__s8x16)__a < (__s8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a < (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__s8x16)__a > (__s8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a > (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__s8x16)__a <= (__s8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a <= (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__s8x16)__a >= (__s8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u8x16)__a >= (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a == (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a != (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a < (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a < (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a > (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a > (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a <= (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a <= (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i16x8)__a >= (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u16x8)__a >= (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a == (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a != (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a < (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a < (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a > (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a > (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a <= (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a <= (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__i32x4)__a >= (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__u32x4)__a >= (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a == (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a != (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a < (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a > (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a <= (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f32x4)__a >= (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_eq(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a == (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ne(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a != (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_lt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a < (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_gt(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a > (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_le(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a <= (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ge(v128_t __a,
-                                                          v128_t __b) {
-  return (v128_t)((__f64x2)__a >= (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_not(v128_t __a) {
-  return ~__a;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_and(v128_t __a,
-                                                          v128_t __b) {
-  return __a & __b;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_or(v128_t __a,
-                                                         v128_t __b) {
-  return __a | __b;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_xor(v128_t __a,
-                                                          v128_t __b) {
-  return __a ^ __b;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
-                                                             v128_t __b) {
-  return __a & ~__b;
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
-                                                                v128_t __b,
-                                                                v128_t __mask) {
-  return (v128_t)__builtin_wasm_bitselect((__i32x4)__a, (__i32x4)__b,
-                                          (__i32x4)__mask);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
-  return (v128_t)(-(__u8x16)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_any_true(v128_t __a) {
-  return __builtin_wasm_any_true_i8x16((__i8x16)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__i8x16)__a << __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__s8x16)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__u8x16)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u8x16)__a + (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_s_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u8x16)__a - (__u8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_s_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_s_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_s_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_i16x8((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
-  return (v128_t)(-(__u16x8)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_any_true(v128_t __a) {
-  return __builtin_wasm_any_true_i16x8((__i16x8)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i16x8((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__i16x8)__a << __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__i16x8)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__u16x8)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u16x8)__a + (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_s_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__i16x8)__a - (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_s_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u16x8)__a * (__u16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_s_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_s_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_i32x4((__i32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
-  return (v128_t)(-(__u32x4)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_any_true(v128_t __a) {
-  return __builtin_wasm_any_true_i32x4((__i32x4)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i32x4((__i32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__i32x4)__a << __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__i32x4)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__u32x4)__a >> __b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u32x4)__a + (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u32x4)__a - (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u32x4)__a * (__u32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_s_i32x4((__i32x4)__a, (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_s_i32x4((__i32x4)__a, (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
-  return (v128_t)(-(__u64x2)__a);
-}
-
-#ifdef __wasm_unimplemented_simd128__
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_any_true(v128_t __a) {
-  return __builtin_wasm_any_true_i64x2((__i64x2)__a);
-}
-
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
-  return __builtin_wasm_all_true_i64x2((__i64x2)__a);
-}
-
-#endif // __wasm_unimplemented_simd128__
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__i64x2)__a << (int64_t)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__i64x2)__a >> (int64_t)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u64x2_shr(v128_t __a,
-                                                           int32_t __b) {
-  return (v128_t)((__u64x2)__a >> (int64_t)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u64x2)__a + (__u64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u64x2)__a - (__u64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__u64x2)__a * (__u64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_neg(v128_t __a) {
-  return (v128_t)(-(__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
-  return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
-}
-
-#ifdef __wasm_unimplemented_simd128__
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfma(v128_t __a,
-                                                            v128_t __b,
-                                                            v128_t __c) {
-  return (v128_t)__builtin_wasm_qfma_f32x4((__f32x4)__a, (__f32x4)__b,
-                                           (__f32x4)__c);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfms(v128_t __a,
-                                                            v128_t __b,
-                                                            v128_t __c) {
-  return (v128_t)__builtin_wasm_qfms_f32x4((__f32x4)__a, (__f32x4)__b,
-                                           (__f32x4)__c);
-}
-
-#endif // __wasm_unimplemented_simd128__
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a + (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a - (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a * (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_div(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f32x4)__a / (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
-  return (v128_t)__builtin_wasm_abs_f64x2((__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_neg(v128_t __a) {
-  return (v128_t)(-(__f64x2)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
-  return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
-}
-
-#ifdef __wasm_unimplemented_simd128__
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfma(v128_t __a,
-                                                            v128_t __b,
-                                                            v128_t __c) {
-  return (v128_t)__builtin_wasm_qfma_f64x2((__f64x2)__a, (__f64x2)__b,
-                                           (__f64x2)__c);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfms(v128_t __a,
-                                                            v128_t __b,
-                                                            v128_t __c) {
-  return (v128_t)__builtin_wasm_qfms_f64x2((__f64x2)__a, (__f64x2)__b,
-                                           (__f64x2)__c);
-}
-
-#endif // __wasm_unimplemented_simd128__
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a + (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sub(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a - (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_mul(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a * (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_div(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)((__f64x2)__a / (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_min(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_min_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
-                                                           v128_t __b) {
-  return (v128_t)__builtin_wasm_max_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
-                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
-  return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f32x4_convert_i32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector((__i32x4)__a, __f32x4);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_f32x4_convert_u32x4(v128_t __a) {
-  return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
-}
-
-#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
-                           __c7, __c8, __c9, __c10, __c11, __c12, __c13,       \
-                           __c14, __c15)                                       \
-  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5,      \
-      __c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
-
-#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
-                           __c7)                                               \
-  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2,        \
-      (__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2,  \
-      (__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2,  \
-      (__c7)*2 + 1))
-
-#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)                   \
-  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2,    \
-      (__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3,        \
-      (__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4,            \
-      (__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
-
-#define wasm_v64x2_shuffle(__a, __b, __c0, __c1)                               \
-  ((v128_t)__builtin_wasm_shuffle_v8x16(                                       \
-      (__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2,    \
-      (__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7,    \
-      (__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4,        \
-      (__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v8x16_swizzle(v128_t __a,
-                                                               v128_t __b) {
-  return (v128_t)__builtin_wasm_swizzle_v8x16((__i8x16)__a, (__i8x16)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_s_i8x16_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_s_i16x8_i32x4((__i32x4)__a,
-                                                     (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
-                                                     (__i32x4)__b);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_low_i8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_high_i8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_low_u8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_high_u8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_low_i16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_high_i16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_low_u16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
-}
-
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_high_u16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
-}
-
-// Undefine helper macros
-#undef __DEFAULT_FN_ATTRS
-
-#endif // __WASM_SIMD128_H
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_builtin_vars.h
similarity index 95%
copy from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
copy to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_builtin_vars.h
index 2ba1521..412e823 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_builtin_vars.h
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_builtin_vars.h
@@ -55,7 +55,9 @@
   __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_tid_z());
   // threadIdx should be convertible to uint3 (in fact in nvcc, it *is* a
   // uint3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator dim3() const;
   __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_threadIdx_t);
 };
@@ -66,7 +68,9 @@
   __CUDA_DEVICE_BUILTIN(z,__nvvm_read_ptx_sreg_ctaid_z());
   // blockIdx should be convertible to uint3 (in fact in nvcc, it *is* a
   // uint3).  This function is defined after we pull in vector_types.h.
+  __attribute__((device)) operator dim3() const;
   __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockIdx_t);
 };
@@ -78,6 +82,8 @@
   // blockDim should be convertible to dim3 (in fact in nvcc, it *is* a
   // dim3).  This function is defined after we pull in vector_types.h.
   __attribute__((device)) operator dim3() const;
+  __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_blockDim_t);
 };
@@ -89,6 +95,8 @@
   // gridDim should be convertible to dim3 (in fact in nvcc, it *is* a
   // dim3).  This function is defined after we pull in vector_types.h.
   __attribute__((device)) operator dim3() const;
+  __attribute__((device)) operator uint3() const;
+
 private:
   __CUDA_DISALLOW_BUILTINVAR_ACCESS(__cuda_builtin_gridDim_t);
 };
@@ -108,5 +116,6 @@
 #undef __CUDA_DEVICE_BUILTIN
 #undef __CUDA_BUILTIN_VAR
 #undef __CUDA_DISALLOW_BUILTINVAR_ACCESS
+#undef __DELETE
 
 #endif /* __CUDA_BUILTIN_VARS_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_cmath.h
similarity index 90%
copy from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
copy to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_cmath.h
index 8ba1826..f49463d 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_cmath.h
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_cmath.h
@@ -66,10 +66,38 @@
 }
 
 // For inscrutable reasons, the CUDA headers define these functions for us on
-// Windows. For OpenMP we omit these as some old system headers have
-// non-conforming `isinf(float)` and `isnan(float)` implementations that return
-// an `int`. The system versions of these functions should be fine anyway.
-#if !defined(_MSC_VER) && !defined(__OPENMP_NVPTX__)
+// Windows.
+#if !defined(_MSC_VER) || defined(__OPENMP_NVPTX__)
+
+// For OpenMP we work around some old system headers that have non-conforming
+// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do
+// this by providing two versions of these functions, differing only in the
+// return type. To avoid conflicting definitions we disable implicit base
+// function generation. That means we will end up with two specializations, one
+// per type, but only one has a base function defined by the system header.
+#if defined(__OPENMP_NVPTX__)
+#pragma omp begin declare variant match(                                       \
+    implementation = {extension(disable_implicit_base)})
+
+// FIXME: We lack an extension to customize the mangling of the variants, e.g.,
+//        add a suffix. This means we would clash with the names of the variants
+//        (note that we do not create implicit base functions here). To avoid
+//        this clash we add a new trait to some of them that is always true
+//        (this is LLVM after all ;)). It will only influence the mangled name
+//        of the variants inside the inner region and avoid the clash.
+#pragma omp begin declare variant match(implementation = {vendor(llvm)})
+
+__DEVICE__ int isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ int isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ int isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ int isfinite(double __x) { return ::__isfinited(__x); }
+__DEVICE__ int isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ int isnan(double __x) { return ::__isnan(__x); }
+
+#pragma omp end declare variant
+
+#endif
+
 __DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
 __DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
 __DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
@@ -79,6 +107,11 @@
 __DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); }
 __DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
 __DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+
+#if defined(__OPENMP_NVPTX__)
+#pragma omp end declare variant
+#endif
+
 #endif
 
 __DEVICE__ bool isgreater(float __x, float __y) {
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_complex_builtins.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__clang_cuda_complex_builtins.h
rename to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_complex_builtins.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_device_functions.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__clang_cuda_device_functions.h
rename to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_device_functions.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_intrinsics.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__clang_cuda_intrinsics.h
rename to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_intrinsics.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_libdevice_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__clang_cuda_libdevice_declares.h
rename to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_libdevice_declares.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_math.h
similarity index 97%
copy from darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
copy to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_math.h
index 332e616..acb26ad 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_cuda_math.h
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_math.h
@@ -195,8 +195,8 @@
 __DEVICE__ int min(int __a, int __b) { return __nv_min(__a, __b); }
 __DEVICE__ double modf(double __a, double *__b) { return __nv_modf(__a, __b); }
 __DEVICE__ float modff(float __a, float *__b) { return __nv_modff(__a, __b); }
-__DEVICE__ double nearbyint(double __a) { return __nv_nearbyint(__a); }
-__DEVICE__ float nearbyintf(float __a) { return __nv_nearbyintf(__a); }
+__DEVICE__ double nearbyint(double __a) { return __builtin_nearbyint(__a); }
+__DEVICE__ float nearbyintf(float __a) { return __builtin_nearbyintf(__a); }
 __DEVICE__ double nextafter(double __a, double __b) {
   return __nv_nextafter(__a, __b);
 }
@@ -249,8 +249,9 @@
 __DEVICE__ float rhypotf(float __a, float __b) {
   return __nv_rhypotf(__a, __b);
 }
-__DEVICE__ double rint(double __a) { return __nv_rint(__a); }
-__DEVICE__ float rintf(float __a) { return __nv_rintf(__a); }
+// __nv_rint* in libdevice is buggy and produces incorrect results.
+__DEVICE__ double rint(double __a) { return __builtin_rint(__a); }
+__DEVICE__ float rintf(float __a) { return __builtin_rintf(__a); }
 __DEVICE__ double rnorm(int __a, const double *__b) {
   return __nv_rnorm(__a, __b);
 }
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_math_forward_declares.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__clang_cuda_math_forward_declares.h
rename to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_math_forward_declares.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_runtime_wrapper.h
similarity index 97%
rename from linux-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
rename to linux-x86/lib64/clang/12.0.1/include/__clang_cuda_runtime_wrapper.h
index f43ed55..f88c39a 100644
--- a/linux-x86/lib64/clang/11.0.5/include/__clang_cuda_runtime_wrapper.h
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_cuda_runtime_wrapper.h
@@ -377,30 +377,38 @@
 // Out-of-line implementations from __clang_cuda_builtin_vars.h.  These need to
 // come after we've pulled in the definition of uint3 and dim3.
 
+__device__ inline __cuda_builtin_threadIdx_t::operator dim3() const {
+  return dim3(x, y, z);
+}
+
 __device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
-  uint3 ret;
-  ret.x = x;
-  ret.y = y;
-  ret.z = z;
-  return ret;
+  return {x, y, z};
+}
+
+__device__ inline __cuda_builtin_blockIdx_t::operator dim3() const {
+  return dim3(x, y, z);
 }
 
 __device__ inline __cuda_builtin_blockIdx_t::operator uint3() const {
-  uint3 ret;
-  ret.x = x;
-  ret.y = y;
-  ret.z = z;
-  return ret;
+  return {x, y, z};
 }
 
 __device__ inline __cuda_builtin_blockDim_t::operator dim3() const {
   return dim3(x, y, z);
 }
 
+__device__ inline __cuda_builtin_blockDim_t::operator uint3() const {
+  return {x, y, z};
+}
+
 __device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
   return dim3(x, y, z);
 }
 
+__device__ inline __cuda_builtin_gridDim_t::operator uint3() const {
+  return {x, y, z};
+}
+
 #include <__clang_cuda_cmath.h>
 #include <__clang_cuda_intrinsics.h>
 #include <__clang_cuda_complex_builtins.h>
diff --git a/linux-x86/lib64/clang/12.0.1/include/__clang_hip_cmath.h b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_cmath.h
new file mode 100644
index 0000000..fea799e
--- /dev/null
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_cmath.h
@@ -0,0 +1,521 @@
+/*===---- __clang_hip_cmath.h - HIP cmath decls -----------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_HIP_CMATH_H__
+#define __CLANG_HIP_CMATH_H__
+
+#if !defined(__HIP__)
+#error "This file is for HIP and OpenMP AMDGCN device compilation only."
+#endif
+
+#if defined(__cplusplus)
+#include <limits>
+#endif
+#include <limits.h>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ static __device__ inline __attribute__((always_inline))
+
+// Start with functions that cannot be defined by DEF macros below.
+#if defined(__cplusplus)
+__DEVICE__ double abs(double __x) { return ::fabs(__x); }
+__DEVICE__ float abs(float __x) { return ::fabsf(__x); }
+__DEVICE__ long long abs(long long __n) { return ::llabs(__n); }
+__DEVICE__ long abs(long __n) { return ::labs(__n); }
+__DEVICE__ float fma(float __x, float __y, float __z) {
+  return ::fmaf(__x, __y, __z);
+}
+__DEVICE__ int fpclassify(float __x) {
+  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+                              FP_ZERO, __x);
+}
+__DEVICE__ int fpclassify(double __x) {
+  return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
+                              FP_ZERO, __x);
+}
+__DEVICE__ float frexp(float __arg, int *__exp) {
+  return ::frexpf(__arg, __exp);
+}
+__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ bool isfinite(double __x) { return ::__finite(__x); }
+__DEVICE__ bool isgreater(float __x, float __y) {
+  return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreater(double __x, double __y) {
+  return __builtin_isgreater(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(float __x, float __y) {
+  return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isgreaterequal(double __x, double __y) {
+  return __builtin_isgreaterequal(__x, __y);
+}
+__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ bool isless(float __x, float __y) {
+  return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool isless(double __x, double __y) {
+  return __builtin_isless(__x, __y);
+}
+__DEVICE__ bool islessequal(float __x, float __y) {
+  return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessequal(double __x, double __y) {
+  return __builtin_islessequal(__x, __y);
+}
+__DEVICE__ bool islessgreater(float __x, float __y) {
+  return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool islessgreater(double __x, double __y) {
+  return __builtin_islessgreater(__x, __y);
+}
+__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
+__DEVICE__ bool isunordered(float __x, float __y) {
+  return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ bool isunordered(double __x, double __y) {
+  return __builtin_isunordered(__x, __y);
+}
+__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
+__DEVICE__ float pow(float __base, int __iexp) {
+  return ::powif(__base, __iexp);
+}
+__DEVICE__ double pow(double __base, int __iexp) {
+  return ::powi(__base, __iexp);
+}
+__DEVICE__ float remquo(float __x, float __y, int *__quo) {
+  return ::remquof(__x, __y, __quo);
+}
+__DEVICE__ float scalbln(float __x, long int __n) {
+  return ::scalblnf(__x, __n);
+}
+__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
+__DEVICE__ bool signbit(double __x) { return ::__signbit(__x); }
+
+// Notably missing above is nexttoward.  We omit it because
+// ocml doesn't provide an implementation, and we don't want to be in the
+// business of implementing tricky libm functions in this header.
+
+// Other functions.
+__DEVICE__ _Float16 fma(_Float16 __x, _Float16 __y, _Float16 __z) {
+  return __ocml_fma_f16(__x, __y, __z);
+}
+__DEVICE__ _Float16 pow(_Float16 __base, int __iexp) {
+  return __ocml_pown_f16(__base, __iexp);
+}
+
+// BEGIN DEF_FUN and HIP_OVERLOAD
+
+// BEGIN DEF_FUN
+
+#pragma push_macro("__DEF_FUN1")
+#pragma push_macro("__DEF_FUN2")
+#pragma push_macro("__DEF_FUN2_FI")
+
+// Define cmath functions with float argument and returns __retty.
+#define __DEF_FUN1(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  __retty __func(float __x) { return __func##f(__x); }
+
+// Define cmath functions with two float arguments and returns __retty.
+#define __DEF_FUN2(__retty, __func)                                            \
+  __DEVICE__                                                                   \
+  __retty __func(float __x, float __y) { return __func##f(__x, __y); }
+
+// Define cmath functions with a float and an int argument and returns __retty.
+#define __DEF_FUN2_FI(__retty, __func)                                         \
+  __DEVICE__                                                                   \
+  __retty __func(float __x, int __y) { return __func##f(__x, __y); }
+
+__DEF_FUN1(float, acos)
+__DEF_FUN1(float, acosh)
+__DEF_FUN1(float, asin)
+__DEF_FUN1(float, asinh)
+__DEF_FUN1(float, atan)
+__DEF_FUN2(float, atan2)
+__DEF_FUN1(float, atanh)
+__DEF_FUN1(float, cbrt)
+__DEF_FUN1(float, ceil)
+__DEF_FUN2(float, copysign)
+__DEF_FUN1(float, cos)
+__DEF_FUN1(float, cosh)
+__DEF_FUN1(float, erf)
+__DEF_FUN1(float, erfc)
+__DEF_FUN1(float, exp)
+__DEF_FUN1(float, exp2)
+__DEF_FUN1(float, expm1)
+__DEF_FUN1(float, fabs)
+__DEF_FUN2(float, fdim)
+__DEF_FUN1(float, floor)
+__DEF_FUN2(float, fmax)
+__DEF_FUN2(float, fmin)
+__DEF_FUN2(float, fmod)
+__DEF_FUN2(float, hypot)
+__DEF_FUN1(int, ilogb)
+__DEF_FUN2_FI(float, ldexp)
+__DEF_FUN1(float, lgamma)
+__DEF_FUN1(float, log)
+__DEF_FUN1(float, log10)
+__DEF_FUN1(float, log1p)
+__DEF_FUN1(float, log2)
+__DEF_FUN1(float, logb)
+__DEF_FUN1(long long, llrint)
+__DEF_FUN1(long long, llround)
+__DEF_FUN1(long, lrint)
+__DEF_FUN1(long, lround)
+__DEF_FUN1(float, nearbyint)
+__DEF_FUN2(float, nextafter)
+__DEF_FUN2(float, pow)
+__DEF_FUN2(float, remainder)
+__DEF_FUN1(float, rint)
+__DEF_FUN1(float, round)
+__DEF_FUN2_FI(float, scalbn)
+__DEF_FUN1(float, sin)
+__DEF_FUN1(float, sinh)
+__DEF_FUN1(float, sqrt)
+__DEF_FUN1(float, tan)
+__DEF_FUN1(float, tanh)
+__DEF_FUN1(float, tgamma)
+__DEF_FUN1(float, trunc)
+
+#pragma pop_macro("__DEF_FUN1")
+#pragma pop_macro("__DEF_FUN2")
+#pragma pop_macro("__DEF_FUN2_FI")
+
+// END DEF_FUN
+
+// BEGIN HIP_OVERLOAD
+
+#pragma push_macro("__HIP_OVERLOAD1")
+#pragma push_macro("__HIP_OVERLOAD2")
+
+// __hip_enable_if::type is a type function which returns __T if __B is true.
+template <bool __B, class __T = void> struct __hip_enable_if {};
+
+template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+
+// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
+// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
+// floor(double).
+#define __HIP_OVERLOAD1(__retty, __fn)                                         \
+  template <typename __T>                                                      \
+  __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer,    \
+                                      __retty>::type                           \
+  __fn(__T __x) {                                                              \
+    return ::__fn((double)__x);                                                \
+  }
+
+// __HIP_OVERLOAD2 is used to resolve function calls with mixed float/double
+// or integer argument to avoid compilation error due to ambibuity. e.g.
+// max(5.0f, 6.0) is resolved with max(double, double).
+#define __HIP_OVERLOAD2(__retty, __fn)                                         \
+  template <typename __T1, typename __T2>                                      \
+  __DEVICE__                                                                   \
+      typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&    \
+                                   std::numeric_limits<__T2>::is_specialized,  \
+                               __retty>::type                                  \
+      __fn(__T1 __x, __T2 __y) {                                               \
+    return __fn((double)__x, (double)__y);                                     \
+  }
+
+__HIP_OVERLOAD1(double, abs)
+__HIP_OVERLOAD1(double, acos)
+__HIP_OVERLOAD1(double, acosh)
+__HIP_OVERLOAD1(double, asin)
+__HIP_OVERLOAD1(double, asinh)
+__HIP_OVERLOAD1(double, atan)
+__HIP_OVERLOAD2(double, atan2)
+__HIP_OVERLOAD1(double, atanh)
+__HIP_OVERLOAD1(double, cbrt)
+__HIP_OVERLOAD1(double, ceil)
+__HIP_OVERLOAD2(double, copysign)
+__HIP_OVERLOAD1(double, cos)
+__HIP_OVERLOAD1(double, cosh)
+__HIP_OVERLOAD1(double, erf)
+__HIP_OVERLOAD1(double, erfc)
+__HIP_OVERLOAD1(double, exp)
+__HIP_OVERLOAD1(double, exp2)
+__HIP_OVERLOAD1(double, expm1)
+__HIP_OVERLOAD1(double, fabs)
+__HIP_OVERLOAD2(double, fdim)
+__HIP_OVERLOAD1(double, floor)
+__HIP_OVERLOAD2(double, fmax)
+__HIP_OVERLOAD2(double, fmin)
+__HIP_OVERLOAD2(double, fmod)
+__HIP_OVERLOAD1(int, fpclassify)
+__HIP_OVERLOAD2(double, hypot)
+__HIP_OVERLOAD1(int, ilogb)
+__HIP_OVERLOAD1(bool, isfinite)
+__HIP_OVERLOAD2(bool, isgreater)
+__HIP_OVERLOAD2(bool, isgreaterequal)
+__HIP_OVERLOAD1(bool, isinf)
+__HIP_OVERLOAD2(bool, isless)
+__HIP_OVERLOAD2(bool, islessequal)
+__HIP_OVERLOAD2(bool, islessgreater)
+__HIP_OVERLOAD1(bool, isnan)
+__HIP_OVERLOAD1(bool, isnormal)
+__HIP_OVERLOAD2(bool, isunordered)
+__HIP_OVERLOAD1(double, lgamma)
+__HIP_OVERLOAD1(double, log)
+__HIP_OVERLOAD1(double, log10)
+__HIP_OVERLOAD1(double, log1p)
+__HIP_OVERLOAD1(double, log2)
+__HIP_OVERLOAD1(double, logb)
+__HIP_OVERLOAD1(long long, llrint)
+__HIP_OVERLOAD1(long long, llround)
+__HIP_OVERLOAD1(long, lrint)
+__HIP_OVERLOAD1(long, lround)
+__HIP_OVERLOAD1(double, nearbyint)
+__HIP_OVERLOAD2(double, nextafter)
+__HIP_OVERLOAD2(double, pow)
+__HIP_OVERLOAD2(double, remainder)
+__HIP_OVERLOAD1(double, rint)
+__HIP_OVERLOAD1(double, round)
+__HIP_OVERLOAD1(bool, signbit)
+__HIP_OVERLOAD1(double, sin)
+__HIP_OVERLOAD1(double, sinh)
+__HIP_OVERLOAD1(double, sqrt)
+__HIP_OVERLOAD1(double, tan)
+__HIP_OVERLOAD1(double, tanh)
+__HIP_OVERLOAD1(double, tgamma)
+__HIP_OVERLOAD1(double, trunc)
+
+// Overload these but don't add them to std, they are not part of cmath.
+__HIP_OVERLOAD2(double, max)
+__HIP_OVERLOAD2(double, min)
+
+// Additional Overloads that don't quite match HIP_OVERLOAD.
+template <typename __T1, typename __T2, typename __T3>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
+                                 std::numeric_limits<__T2>::is_specialized &&
+                                 std::numeric_limits<__T3>::is_specialized,
+                             double>::type
+    fma(__T1 __x, __T2 __y, __T3 __z) {
+  return ::fma((double)__x, (double)__y, (double)__z);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    frexp(__T __x, int *__exp) {
+  return ::frexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    ldexp(__T __x, int __exp) {
+  return ::ldexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    modf(__T __x, double *__exp) {
+  return ::modf((double)__x, __exp);
+}
+
+template <typename __T1, typename __T2>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
+                                 std::numeric_limits<__T2>::is_specialized,
+                             double>::type
+    remquo(__T1 __x, __T2 __y, int *__quo) {
+  return ::remquo((double)__x, (double)__y, __quo);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    scalbln(__T __x, long int __exp) {
+  return ::scalbln((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__
+    typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+    scalbn(__T __x, int __exp) {
+  return ::scalbn((double)__x, __exp);
+}
+
+#pragma pop_macro("__HIP_OVERLOAD1")
+#pragma pop_macro("__HIP_OVERLOAD2")
+
+// END HIP_OVERLOAD
+
+// END DEF_FUN and HIP_OVERLOAD
+
+#endif // defined(__cplusplus)
+
+// Define these overloads inside the namespace our standard library uses.
+#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
+_LIBCPP_BEGIN_NAMESPACE_STD
+#else
+namespace std {
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#endif
+#endif
+
+// Pull the new overloads we defined above into namespace std.
+// using ::abs; - This may be considered for C++.
+using ::acos;
+using ::acosh;
+using ::asin;
+using ::asinh;
+using ::atan;
+using ::atan2;
+using ::atanh;
+using ::cbrt;
+using ::ceil;
+using ::copysign;
+using ::cos;
+using ::cosh;
+using ::erf;
+using ::erfc;
+using ::exp;
+using ::exp2;
+using ::expm1;
+using ::fabs;
+using ::fdim;
+using ::floor;
+using ::fma;
+using ::fmax;
+using ::fmin;
+using ::fmod;
+using ::fpclassify;
+using ::frexp;
+using ::hypot;
+using ::ilogb;
+using ::isfinite;
+using ::isgreater;
+using ::isgreaterequal;
+using ::isless;
+using ::islessequal;
+using ::islessgreater;
+using ::isnormal;
+using ::isunordered;
+using ::ldexp;
+using ::lgamma;
+using ::llrint;
+using ::llround;
+using ::log;
+using ::log10;
+using ::log1p;
+using ::log2;
+using ::logb;
+using ::lrint;
+using ::lround;
+using ::modf;
+// using ::nan; - This may be considered for C++.
+// using ::nanf; - This may be considered for C++.
+// using ::nanl; - This is not yet defined.
+using ::nearbyint;
+using ::nextafter;
+// using ::nexttoward; - Omit this since we do not have a definition.
+using ::pow;
+using ::remainder;
+using ::remquo;
+using ::rint;
+using ::round;
+using ::scalbln;
+using ::scalbn;
+using ::signbit;
+using ::sin;
+using ::sinh;
+using ::sqrt;
+using ::tan;
+using ::tanh;
+using ::tgamma;
+using ::trunc;
+
+// Well this is fun: We need to pull these symbols in for libc++, but we can't
+// pull them in with libstdc++, because its ::isinf and ::isnan are different
+// than its std::isinf and std::isnan.
+#ifndef __GLIBCXX__
+using ::isinf;
+using ::isnan;
+#endif
+
+// Finally, pull the "foobarf" functions that HIP defines into std.
+using ::acosf;
+using ::acoshf;
+using ::asinf;
+using ::asinhf;
+using ::atan2f;
+using ::atanf;
+using ::atanhf;
+using ::cbrtf;
+using ::ceilf;
+using ::copysignf;
+using ::cosf;
+using ::coshf;
+using ::erfcf;
+using ::erff;
+using ::exp2f;
+using ::expf;
+using ::expm1f;
+using ::fabsf;
+using ::fdimf;
+using ::floorf;
+using ::fmaf;
+using ::fmaxf;
+using ::fminf;
+using ::fmodf;
+using ::frexpf;
+using ::hypotf;
+using ::ilogbf;
+using ::ldexpf;
+using ::lgammaf;
+using ::llrintf;
+using ::llroundf;
+using ::log10f;
+using ::log1pf;
+using ::log2f;
+using ::logbf;
+using ::logf;
+using ::lrintf;
+using ::lroundf;
+using ::modff;
+using ::nearbyintf;
+using ::nextafterf;
+// using ::nexttowardf; - Omit this since we do not have a definition.
+using ::powf;
+using ::remainderf;
+using ::remquof;
+using ::rintf;
+using ::roundf;
+using ::scalblnf;
+using ::scalbnf;
+using ::sinf;
+using ::sinhf;
+using ::sqrtf;
+using ::tanf;
+using ::tanhf;
+using ::tgammaf;
+using ::truncf;
+
+#ifdef _LIBCPP_END_NAMESPACE_STD
+_LIBCPP_END_NAMESPACE_STD
+#else
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
+} // namespace std
+#endif
+
+#pragma pop_macro("__DEVICE__")
+
+#endif // __CLANG_HIP_CMATH_H__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_libdevice_declares.h
similarity index 97%
copy from darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
copy to linux-x86/lib64/clang/12.0.1/include/__clang_hip_libdevice_declares.h
index e1cd49a..ac98907 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_libdevice_declares.h
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_libdevice_declares.h
@@ -10,7 +10,9 @@
 #ifndef __CLANG_HIP_LIBDEVICE_DECLARES_H__
 #define __CLANG_HIP_LIBDEVICE_DECLARES_H__
 
+#ifdef __cplusplus
 extern "C" {
+#endif
 
 // BEGIN FLOAT
 __device__ __attribute__((const)) float __ocml_acos_f32(float);
@@ -78,6 +80,7 @@
 __device__ __attribute__((pure)) float __ocml_ncdf_f32(float);
 __device__ __attribute__((pure)) float __ocml_ncdfinv_f32(float);
 __device__ __attribute__((pure)) float __ocml_pow_f32(float, float);
+__device__ __attribute__((pure)) float __ocml_pown_f32(float, int);
 __device__ __attribute__((pure)) float __ocml_rcbrt_f32(float);
 __device__ __attribute__((const)) float __ocml_remainder_f32(float, float);
 __device__ float __ocml_remquo_f32(float, float,
@@ -126,10 +129,10 @@
 __device__ __attribute__((const)) float __ocml_div_rtn_f32(float, float);
 __device__ __attribute__((const)) float __ocml_div_rtp_f32(float, float);
 __device__ __attribute__((const)) float __ocml_div_rtz_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float, float);
-__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float, float);
+__device__ __attribute__((const)) float __ocml_sqrt_rte_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtn_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtp_f32(float);
+__device__ __attribute__((const)) float __ocml_sqrt_rtz_f32(float);
 __device__ __attribute__((const)) float __ocml_fma_rte_f32(float, float, float);
 __device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
 __device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
@@ -205,6 +208,7 @@
 __device__ __attribute__((pure)) double __ocml_ncdf_f64(double);
 __device__ __attribute__((pure)) double __ocml_ncdfinv_f64(double);
 __device__ __attribute__((pure)) double __ocml_pow_f64(double, double);
+__device__ __attribute__((pure)) double __ocml_pown_f64(double, int);
 __device__ __attribute__((pure)) double __ocml_rcbrt_f64(double);
 __device__ __attribute__((const)) double __ocml_remainder_f64(double, double);
 __device__ double __ocml_remquo_f64(double, double,
@@ -252,10 +256,10 @@
 __device__ __attribute__((const)) double __ocml_div_rtn_f64(double, double);
 __device__ __attribute__((const)) double __ocml_div_rtp_f64(double, double);
 __device__ __attribute__((const)) double __ocml_div_rtz_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double, double);
-__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double, double);
+__device__ __attribute__((const)) double __ocml_sqrt_rte_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtn_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtp_f64(double);
+__device__ __attribute__((const)) double __ocml_sqrt_rtz_f64(double);
 __device__ __attribute__((const)) double __ocml_fma_rte_f64(double, double,
                                                             double);
 __device__ __attribute__((const)) double __ocml_fma_rtn_f64(double, double,
@@ -290,6 +294,7 @@
 __device__ _Float16 __ocml_sin_f16(_Float16);
 __device__ __attribute__((const)) _Float16 __ocml_sqrt_f16(_Float16);
 __device__ __attribute__((const)) _Float16 __ocml_trunc_f16(_Float16);
+__device__ __attribute__((pure)) _Float16 __ocml_pown_f16(_Float16, int);
 
 typedef _Float16 __2f16 __attribute__((ext_vector_type(2)));
 typedef short __2i16 __attribute__((ext_vector_type(2)));
@@ -313,14 +318,17 @@
 __device__ inline __2f16
 __llvm_amdgcn_rcp_2f16(__2f16 __x) // Not currently exposed by ROCDL.
 {
-  return __2f16{__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y)};
+  return (__2f16)(__llvm_amdgcn_rcp_f16(__x.x), __llvm_amdgcn_rcp_f16(__x.y));
 }
 __device__ __attribute__((const)) __2f16 __ocml_rint_2f16(__2f16);
 __device__ __attribute__((const)) __2f16 __ocml_rsqrt_2f16(__2f16);
 __device__ __2f16 __ocml_sin_2f16(__2f16);
 __device__ __attribute__((const)) __2f16 __ocml_sqrt_2f16(__2f16);
 __device__ __attribute__((const)) __2f16 __ocml_trunc_2f16(__2f16);
+__device__ __attribute__((const)) __2f16 __ocml_pown_2f16(__2f16, __2i16);
 
+#ifdef __cplusplus
 } // extern "C"
+#endif
 
 #endif // __CLANG_HIP_LIBDEVICE_DECLARES_H__
diff --git a/linux-x86/lib64/clang/12.0.1/include/__clang_hip_math.h b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_math.h
new file mode 100644
index 0000000..f2365e8
--- /dev/null
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_math.h
@@ -0,0 +1,1246 @@
+/*===---- __clang_hip_math.h - Device-side HIP math support ----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __CLANG_HIP_MATH_H__
+#define __CLANG_HIP_MATH_H__
+
+#if !defined(__HIP__)
+#error "This file is for HIP and OpenMP AMDGCN device compilation only."
+#endif
+
+#if defined(__cplusplus)
+#include <algorithm>
+#endif
+#include <limits.h>
+#include <stdint.h>
+
+#pragma push_macro("__DEVICE__")
+#define __DEVICE__ static __device__ inline __attribute__((always_inline))
+
+// A few functions return bool type starting only in C++11.
+#pragma push_macro("__RETURN_TYPE")
+#if defined(__cplusplus)
+#define __RETURN_TYPE bool
+#else
+#define __RETURN_TYPE int
+#endif
+
+#if defined (__cplusplus) && __cplusplus < 201103L
+// emulate static_assert on type sizes
+template<bool>
+struct __compare_result{};
+template<>
+struct __compare_result<true> {
+  static const bool valid;
+};
+
+__DEVICE__
+void __suppress_unused_warning(bool b){};
+template <unsigned int S, unsigned int T>
+__DEVICE__ void __static_assert_equal_size() {
+  __suppress_unused_warning(__compare_result<S == T>::valid);
+}
+
+#define __static_assert_type_size_equal(A, B) \
+  __static_assert_equal_size<A,B>()
+
+#else
+#define __static_assert_type_size_equal(A,B) \
+  static_assert((A) == (B), "")
+
+#endif
+
+__DEVICE__
+uint64_t __make_mantissa_base8(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '7')
+      __r = (__r * 8u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+uint64_t __make_mantissa_base10(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 10u) + __tmp - '0';
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+uint64_t __make_mantissa_base16(const char *__tagp) {
+  uint64_t __r = 0;
+  while (__tagp) {
+    char __tmp = *__tagp;
+
+    if (__tmp >= '0' && __tmp <= '9')
+      __r = (__r * 16u) + __tmp - '0';
+    else if (__tmp >= 'a' && __tmp <= 'f')
+      __r = (__r * 16u) + __tmp - 'a' + 10;
+    else if (__tmp >= 'A' && __tmp <= 'F')
+      __r = (__r * 16u) + __tmp - 'A' + 10;
+    else
+      return 0;
+
+    ++__tagp;
+  }
+
+  return __r;
+}
+
+__DEVICE__
+uint64_t __make_mantissa(const char *__tagp) {
+  if (!__tagp)
+    return 0u;
+
+  if (*__tagp == '0') {
+    ++__tagp;
+
+    if (*__tagp == 'x' || *__tagp == 'X')
+      return __make_mantissa_base16(__tagp);
+    else
+      return __make_mantissa_base8(__tagp);
+  }
+
+  return __make_mantissa_base10(__tagp);
+}
+
+// BEGIN FLOAT
+#if defined(__cplusplus)
+__DEVICE__
+int abs(int __x) {
+  int __sgn = __x >> (sizeof(int) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+long labs(long __x) {
+  long __sgn = __x >> (sizeof(long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+__DEVICE__
+long long llabs(long long __x) {
+  long long __sgn = __x >> (sizeof(long long) * CHAR_BIT - 1);
+  return (__x ^ __sgn) - __sgn;
+}
+#endif
+
+__DEVICE__
+float acosf(float __x) { return __ocml_acos_f32(__x); }
+
+__DEVICE__
+float acoshf(float __x) { return __ocml_acosh_f32(__x); }
+
+__DEVICE__
+float asinf(float __x) { return __ocml_asin_f32(__x); }
+
+__DEVICE__
+float asinhf(float __x) { return __ocml_asinh_f32(__x); }
+
+__DEVICE__
+float atan2f(float __x, float __y) { return __ocml_atan2_f32(__x, __y); }
+
+__DEVICE__
+float atanf(float __x) { return __ocml_atan_f32(__x); }
+
+__DEVICE__
+float atanhf(float __x) { return __ocml_atanh_f32(__x); }
+
+__DEVICE__
+float cbrtf(float __x) { return __ocml_cbrt_f32(__x); }
+
+__DEVICE__
+float ceilf(float __x) { return __ocml_ceil_f32(__x); }
+
+__DEVICE__
+float copysignf(float __x, float __y) { return __ocml_copysign_f32(__x, __y); }
+
+__DEVICE__
+float cosf(float __x) { return __ocml_cos_f32(__x); }
+
+__DEVICE__
+float coshf(float __x) { return __ocml_cosh_f32(__x); }
+
+__DEVICE__
+float cospif(float __x) { return __ocml_cospi_f32(__x); }
+
+__DEVICE__
+float cyl_bessel_i0f(float __x) { return __ocml_i0_f32(__x); }
+
+__DEVICE__
+float cyl_bessel_i1f(float __x) { return __ocml_i1_f32(__x); }
+
+__DEVICE__
+float erfcf(float __x) { return __ocml_erfc_f32(__x); }
+
+__DEVICE__
+float erfcinvf(float __x) { return __ocml_erfcinv_f32(__x); }
+
+__DEVICE__
+float erfcxf(float __x) { return __ocml_erfcx_f32(__x); }
+
+__DEVICE__
+float erff(float __x) { return __ocml_erf_f32(__x); }
+
+__DEVICE__
+float erfinvf(float __x) { return __ocml_erfinv_f32(__x); }
+
+__DEVICE__
+float exp10f(float __x) { return __ocml_exp10_f32(__x); }
+
+__DEVICE__
+float exp2f(float __x) { return __ocml_exp2_f32(__x); }
+
+__DEVICE__
+float expf(float __x) { return __ocml_exp_f32(__x); }
+
+__DEVICE__
+float expm1f(float __x) { return __ocml_expm1_f32(__x); }
+
+__DEVICE__
+float fabsf(float __x) { return __ocml_fabs_f32(__x); }
+
+__DEVICE__
+float fdimf(float __x, float __y) { return __ocml_fdim_f32(__x, __y); }
+
+__DEVICE__
+float fdividef(float __x, float __y) { return __x / __y; }
+
+__DEVICE__
+float floorf(float __x) { return __ocml_floor_f32(__x); }
+
+__DEVICE__
+float fmaf(float __x, float __y, float __z) {
+  return __ocml_fma_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float fmaxf(float __x, float __y) { return __ocml_fmax_f32(__x, __y); }
+
+__DEVICE__
+float fminf(float __x, float __y) { return __ocml_fmin_f32(__x, __y); }
+
+__DEVICE__
+float fmodf(float __x, float __y) { return __ocml_fmod_f32(__x, __y); }
+
+__DEVICE__
+float frexpf(float __x, int *__nptr) {
+  int __tmp;
+  float __r =
+      __ocml_frexp_f32(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+float hypotf(float __x, float __y) { return __ocml_hypot_f32(__x, __y); }
+
+__DEVICE__
+int ilogbf(float __x) { return __ocml_ilogb_f32(__x); }
+
+__DEVICE__
+__RETURN_TYPE __finitef(float __x) { return __ocml_isfinite_f32(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isinff(float __x) { return __ocml_isinf_f32(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isnanf(float __x) { return __ocml_isnan_f32(__x); }
+
+__DEVICE__
+float j0f(float __x) { return __ocml_j0_f32(__x); }
+
+__DEVICE__
+float j1f(float __x) { return __ocml_j1_f32(__x); }
+
+__DEVICE__
+float jnf(int __n, float __x) { // TODO: we could use Ahmes multiplication
+                                // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case.
+  if (__n == 0)
+    return j0f(__x);
+  if (__n == 1)
+    return j1f(__x);
+
+  float __x0 = j0f(__x);
+  float __x1 = j1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+__DEVICE__
+float ldexpf(float __x, int __e) { return __ocml_ldexp_f32(__x, __e); }
+
+__DEVICE__
+float lgammaf(float __x) { return __ocml_lgamma_f32(__x); }
+
+__DEVICE__
+long long int llrintf(float __x) { return __ocml_rint_f32(__x); }
+
+__DEVICE__
+long long int llroundf(float __x) { return __ocml_round_f32(__x); }
+
+__DEVICE__
+float log10f(float __x) { return __ocml_log10_f32(__x); }
+
+__DEVICE__
+float log1pf(float __x) { return __ocml_log1p_f32(__x); }
+
+__DEVICE__
+float log2f(float __x) { return __ocml_log2_f32(__x); }
+
+__DEVICE__
+float logbf(float __x) { return __ocml_logb_f32(__x); }
+
+__DEVICE__
+float logf(float __x) { return __ocml_log_f32(__x); }
+
+__DEVICE__
+long int lrintf(float __x) { return __ocml_rint_f32(__x); }
+
+__DEVICE__
+long int lroundf(float __x) { return __ocml_round_f32(__x); }
+
+__DEVICE__
+float modff(float __x, float *__iptr) {
+  float __tmp;
+  float __r =
+      __ocml_modf_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__iptr = __tmp;
+  return __r;
+}
+
+__DEVICE__
+float nanf(const char *__tagp) {
+  union {
+    float val;
+    struct ieee_float {
+      unsigned int mantissa : 22;
+      unsigned int quiet : 1;
+      unsigned int exponent : 8;
+      unsigned int sign : 1;
+    } bits;
+  } __tmp;
+  __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+}
+
+__DEVICE__
+float nearbyintf(float __x) { return __ocml_nearbyint_f32(__x); }
+
+__DEVICE__
+float nextafterf(float __x, float __y) {
+  return __ocml_nextafter_f32(__x, __y);
+}
+
+__DEVICE__
+float norm3df(float __x, float __y, float __z) {
+  return __ocml_len3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float norm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_len4_f32(__x, __y, __z, __w);
+}
+
+__DEVICE__
+float normcdff(float __x) { return __ocml_ncdf_f32(__x); }
+
+__DEVICE__
+float normcdfinvf(float __x) { return __ocml_ncdfinv_f32(__x); }
+
+__DEVICE__
+float normf(int __dim,
+            const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f32(__r);
+}
+
+__DEVICE__
+float powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+
+__DEVICE__
+float powif(float __x, int __y) { return __ocml_pown_f32(__x, __y); }
+
+__DEVICE__
+float rcbrtf(float __x) { return __ocml_rcbrt_f32(__x); }
+
+__DEVICE__
+float remainderf(float __x, float __y) {
+  return __ocml_remainder_f32(__x, __y);
+}
+
+__DEVICE__
+float remquof(float __x, float __y, int *__quo) {
+  int __tmp;
+  float __r = __ocml_remquo_f32(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+float rhypotf(float __x, float __y) { return __ocml_rhypot_f32(__x, __y); }
+
+__DEVICE__
+float rintf(float __x) { return __ocml_rint_f32(__x); }
+
+__DEVICE__
+float rnorm3df(float __x, float __y, float __z) {
+  return __ocml_rlen3_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float rnorm4df(float __x, float __y, float __z, float __w) {
+  return __ocml_rlen4_f32(__x, __y, __z, __w);
+}
+
+__DEVICE__
+float rnormf(int __dim,
+             const float *__a) { // TODO: placeholder until OCML adds support.
+  float __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f32(__r);
+}
+
+__DEVICE__
+float roundf(float __x) { return __ocml_round_f32(__x); }
+
+__DEVICE__
+float rsqrtf(float __x) { return __ocml_rsqrt_f32(__x); }
+
+__DEVICE__
+float scalblnf(float __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f32(__x, __n)
+                         : __ocml_scalb_f32(__x, __n);
+}
+
+__DEVICE__
+float scalbnf(float __x, int __n) { return __ocml_scalbn_f32(__x, __n); }
+
+__DEVICE__
+__RETURN_TYPE __signbitf(float __x) { return __ocml_signbit_f32(__x); }
+
+__DEVICE__
+void sincosf(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+  *__sinptr =
+      __ocml_sincos_f32(__x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+void sincospif(float __x, float *__sinptr, float *__cosptr) {
+  float __tmp;
+  *__sinptr = __ocml_sincospi_f32(
+      __x, (__attribute__((address_space(5))) float *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+float sinf(float __x) { return __ocml_sin_f32(__x); }
+
+__DEVICE__
+float sinhf(float __x) { return __ocml_sinh_f32(__x); }
+
+__DEVICE__
+float sinpif(float __x) { return __ocml_sinpi_f32(__x); }
+
+__DEVICE__
+float sqrtf(float __x) { return __ocml_sqrt_f32(__x); }
+
+__DEVICE__
+float tanf(float __x) { return __ocml_tan_f32(__x); }
+
+__DEVICE__
+float tanhf(float __x) { return __ocml_tanh_f32(__x); }
+
+__DEVICE__
+float tgammaf(float __x) { return __ocml_tgamma_f32(__x); }
+
+__DEVICE__
+float truncf(float __x) { return __ocml_trunc_f32(__x); }
+
+__DEVICE__
+float y0f(float __x) { return __ocml_y0_f32(__x); }
+
+__DEVICE__
+float y1f(float __x) { return __ocml_y1_f32(__x); }
+
+__DEVICE__
+float ynf(int __n, float __x) { // TODO: we could use Ahmes multiplication
+                                // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return y0f(__x);
+  if (__n == 1)
+    return y1f(__x);
+
+  float __x0 = y0f(__x);
+  float __x1 = y1f(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    float __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+
+__DEVICE__
+float __cosf(float __x) { return __ocml_native_cos_f32(__x); }
+
+__DEVICE__
+float __exp10f(float __x) { return __ocml_native_exp10_f32(__x); }
+
+__DEVICE__
+float __expf(float __x) { return __ocml_native_exp_f32(__x); }
+
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fadd_rd(float __x, float __y) { return __ocml_add_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fadd_rn(float __x, float __y) { return __ocml_add_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fadd_ru(float __x, float __y) { return __ocml_add_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fadd_rz(float __x, float __y) { return __ocml_add_rtz_f32(__x, __y); }
+
+__DEVICE__
+float __fdiv_rd(float __x, float __y) { return __ocml_div_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fdiv_rn(float __x, float __y) { return __ocml_div_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fdiv_ru(float __x, float __y) { return __ocml_div_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fdiv_rz(float __x, float __y) { return __ocml_div_rtz_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fdividef(float __x, float __y) { return __x / __y; }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmaf_rd(float __x, float __y, float __z) {
+  return __ocml_fma_rtn_f32(__x, __y, __z);
+}
+#endif
+__DEVICE__
+float __fmaf_rn(float __x, float __y, float __z) {
+  return __ocml_fma_rte_f32(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmaf_ru(float __x, float __y, float __z) {
+  return __ocml_fma_rtp_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float __fmaf_rz(float __x, float __y, float __z) {
+  return __ocml_fma_rtz_f32(__x, __y, __z);
+}
+
+__DEVICE__
+float __fmul_rd(float __x, float __y) { return __ocml_mul_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fmul_rn(float __x, float __y) { return __ocml_mul_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fmul_ru(float __x, float __y) { return __ocml_mul_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fmul_rz(float __x, float __y) { return __ocml_mul_rtz_f32(__x, __y); }
+
+__DEVICE__
+float __frcp_rd(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+float __frcp_rn(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __frcp_ru(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+
+__DEVICE__
+float __frcp_rz(float __x) { return __llvm_amdgcn_rcp_f32(__x); }
+#endif
+__DEVICE__
+float __frsqrt_rn(float __x) { return __llvm_amdgcn_rsq_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsqrt_rd(float __x) { return __ocml_sqrt_rtn_f32(__x); }
+#endif
+__DEVICE__
+float __fsqrt_rn(float __x) { return __ocml_sqrt_rte_f32(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsqrt_ru(float __x) { return __ocml_sqrt_rtp_f32(__x); }
+
+__DEVICE__
+float __fsqrt_rz(float __x) { return __ocml_sqrt_rtz_f32(__x); }
+
+__DEVICE__
+float __fsub_rd(float __x, float __y) { return __ocml_sub_rtn_f32(__x, __y); }
+#endif
+__DEVICE__
+float __fsub_rn(float __x, float __y) { return __ocml_sub_rte_f32(__x, __y); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+float __fsub_ru(float __x, float __y) { return __ocml_sub_rtp_f32(__x, __y); }
+
+__DEVICE__
+float __fsub_rz(float __x, float __y) { return __ocml_sub_rtz_f32(__x, __y); }
+#endif
+__DEVICE__
+float __log10f(float __x) { return __ocml_native_log10_f32(__x); }
+
+__DEVICE__
+float __log2f(float __x) { return __ocml_native_log2_f32(__x); }
+
+__DEVICE__
+float __logf(float __x) { return __ocml_native_log_f32(__x); }
+
+__DEVICE__
+float __powf(float __x, float __y) { return __ocml_pow_f32(__x, __y); }
+
+__DEVICE__
+float __saturatef(float __x) { return (__x < 0) ? 0 : ((__x > 1) ? 1 : __x); }
+
+__DEVICE__
+void __sincosf(float __x, float *__sinptr, float *__cosptr) {
+  *__sinptr = __ocml_native_sin_f32(__x);
+  *__cosptr = __ocml_native_cos_f32(__x);
+}
+
+__DEVICE__
+float __sinf(float __x) { return __ocml_native_sin_f32(__x); }
+
+__DEVICE__
+float __tanf(float __x) { return __ocml_tan_f32(__x); }
+// END INTRINSICS
+// END FLOAT
+
+// BEGIN DOUBLE
+__DEVICE__
+double acos(double __x) { return __ocml_acos_f64(__x); }
+
+__DEVICE__
+double acosh(double __x) { return __ocml_acosh_f64(__x); }
+
+__DEVICE__
+double asin(double __x) { return __ocml_asin_f64(__x); }
+
+__DEVICE__
+double asinh(double __x) { return __ocml_asinh_f64(__x); }
+
+__DEVICE__
+double atan(double __x) { return __ocml_atan_f64(__x); }
+
+__DEVICE__
+double atan2(double __x, double __y) { return __ocml_atan2_f64(__x, __y); }
+
+__DEVICE__
+double atanh(double __x) { return __ocml_atanh_f64(__x); }
+
+__DEVICE__
+double cbrt(double __x) { return __ocml_cbrt_f64(__x); }
+
+__DEVICE__
+double ceil(double __x) { return __ocml_ceil_f64(__x); }
+
+__DEVICE__
+double copysign(double __x, double __y) {
+  return __ocml_copysign_f64(__x, __y);
+}
+
+__DEVICE__
+double cos(double __x) { return __ocml_cos_f64(__x); }
+
+__DEVICE__
+double cosh(double __x) { return __ocml_cosh_f64(__x); }
+
+__DEVICE__
+double cospi(double __x) { return __ocml_cospi_f64(__x); }
+
+__DEVICE__
+double cyl_bessel_i0(double __x) { return __ocml_i0_f64(__x); }
+
+__DEVICE__
+double cyl_bessel_i1(double __x) { return __ocml_i1_f64(__x); }
+
+__DEVICE__
+double erf(double __x) { return __ocml_erf_f64(__x); }
+
+__DEVICE__
+double erfc(double __x) { return __ocml_erfc_f64(__x); }
+
+__DEVICE__
+double erfcinv(double __x) { return __ocml_erfcinv_f64(__x); }
+
+__DEVICE__
+double erfcx(double __x) { return __ocml_erfcx_f64(__x); }
+
+__DEVICE__
+double erfinv(double __x) { return __ocml_erfinv_f64(__x); }
+
+__DEVICE__
+double exp(double __x) { return __ocml_exp_f64(__x); }
+
+__DEVICE__
+double exp10(double __x) { return __ocml_exp10_f64(__x); }
+
+__DEVICE__
+double exp2(double __x) { return __ocml_exp2_f64(__x); }
+
+__DEVICE__
+double expm1(double __x) { return __ocml_expm1_f64(__x); }
+
+__DEVICE__
+double fabs(double __x) { return __ocml_fabs_f64(__x); }
+
+__DEVICE__
+double fdim(double __x, double __y) { return __ocml_fdim_f64(__x, __y); }
+
+__DEVICE__
+double floor(double __x) { return __ocml_floor_f64(__x); }
+
+__DEVICE__
+double fma(double __x, double __y, double __z) {
+  return __ocml_fma_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double fmax(double __x, double __y) { return __ocml_fmax_f64(__x, __y); }
+
+__DEVICE__
+double fmin(double __x, double __y) { return __ocml_fmin_f64(__x, __y); }
+
+__DEVICE__
+double fmod(double __x, double __y) { return __ocml_fmod_f64(__x, __y); }
+
+__DEVICE__
+double frexp(double __x, int *__nptr) {
+  int __tmp;
+  double __r =
+      __ocml_frexp_f64(__x, (__attribute__((address_space(5))) int *)&__tmp);
+  *__nptr = __tmp;
+  return __r;
+}
+
+__DEVICE__
+double hypot(double __x, double __y) { return __ocml_hypot_f64(__x, __y); }
+
+__DEVICE__
+int ilogb(double __x) { return __ocml_ilogb_f64(__x); }
+
+__DEVICE__
+__RETURN_TYPE __finite(double __x) { return __ocml_isfinite_f64(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isinf(double __x) { return __ocml_isinf_f64(__x); }
+
+__DEVICE__
+__RETURN_TYPE __isnan(double __x) { return __ocml_isnan_f64(__x); }
+
+__DEVICE__
+double j0(double __x) { return __ocml_j0_f64(__x); }
+
+__DEVICE__
+double j1(double __x) { return __ocml_j1_f64(__x); }
+
+__DEVICE__
+double jn(int __n, double __x) { // TODO: we could use Ahmes multiplication
+                                 // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return j0(__x);
+  if (__n == 1)
+    return j1(__x);
+
+  double __x0 = j0(__x);
+  double __x1 = j1(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+  return __x1;
+}
+
+__DEVICE__
+double ldexp(double __x, int __e) { return __ocml_ldexp_f64(__x, __e); }
+
+__DEVICE__
+double lgamma(double __x) { return __ocml_lgamma_f64(__x); }
+
+__DEVICE__
+long long int llrint(double __x) { return __ocml_rint_f64(__x); }
+
+__DEVICE__
+long long int llround(double __x) { return __ocml_round_f64(__x); }
+
+__DEVICE__
+double log(double __x) { return __ocml_log_f64(__x); }
+
+__DEVICE__
+double log10(double __x) { return __ocml_log10_f64(__x); }
+
+__DEVICE__
+double log1p(double __x) { return __ocml_log1p_f64(__x); }
+
+__DEVICE__
+double log2(double __x) { return __ocml_log2_f64(__x); }
+
+__DEVICE__
+double logb(double __x) { return __ocml_logb_f64(__x); }
+
+__DEVICE__
+long int lrint(double __x) { return __ocml_rint_f64(__x); }
+
+__DEVICE__
+long int lround(double __x) { return __ocml_round_f64(__x); }
+
+__DEVICE__
+double modf(double __x, double *__iptr) {
+  double __tmp;
+  double __r =
+      __ocml_modf_f64(__x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__iptr = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+double nan(const char *__tagp) {
+#if !_WIN32
+  union {
+    double val;
+    struct ieee_double {
+      uint64_t mantissa : 51;
+      uint32_t quiet : 1;
+      uint32_t exponent : 11;
+      uint32_t sign : 1;
+    } bits;
+  } __tmp;
+  __static_assert_type_size_equal(sizeof(__tmp.val), sizeof(__tmp.bits));
+
+  __tmp.bits.sign = 0u;
+  __tmp.bits.exponent = ~0u;
+  __tmp.bits.quiet = 1u;
+  __tmp.bits.mantissa = __make_mantissa(__tagp);
+
+  return __tmp.val;
+#else
+  __static_assert_type_size_equal(sizeof(uint64_t), sizeof(double));
+  uint64_t __val = __make_mantissa(__tagp);
+  __val |= 0xFFF << 51;
+  return *reinterpret_cast<double *>(&__val);
+#endif
+}
+
+__DEVICE__
+double nearbyint(double __x) { return __ocml_nearbyint_f64(__x); }
+
+__DEVICE__
+double nextafter(double __x, double __y) {
+  return __ocml_nextafter_f64(__x, __y);
+}
+
+__DEVICE__
+double norm(int __dim,
+            const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_sqrt_f64(__r);
+}
+
+__DEVICE__
+double norm3d(double __x, double __y, double __z) {
+  return __ocml_len3_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double norm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_len4_f64(__x, __y, __z, __w);
+}
+
+__DEVICE__
+double normcdf(double __x) { return __ocml_ncdf_f64(__x); }
+
+__DEVICE__
+double normcdfinv(double __x) { return __ocml_ncdfinv_f64(__x); }
+
+__DEVICE__
+double pow(double __x, double __y) { return __ocml_pow_f64(__x, __y); }
+
+__DEVICE__
+double powi(double __x, int __y) { return __ocml_pown_f64(__x, __y); }
+
+__DEVICE__
+double rcbrt(double __x) { return __ocml_rcbrt_f64(__x); }
+
+__DEVICE__
+double remainder(double __x, double __y) {
+  return __ocml_remainder_f64(__x, __y);
+}
+
+__DEVICE__
+double remquo(double __x, double __y, int *__quo) {
+  int __tmp;
+  double __r = __ocml_remquo_f64(
+      __x, __y, (__attribute__((address_space(5))) int *)&__tmp);
+  *__quo = __tmp;
+
+  return __r;
+}
+
+__DEVICE__
+double rhypot(double __x, double __y) { return __ocml_rhypot_f64(__x, __y); }
+
+__DEVICE__
+double rint(double __x) { return __ocml_rint_f64(__x); }
+
+__DEVICE__
+double rnorm(int __dim,
+             const double *__a) { // TODO: placeholder until OCML adds support.
+  double __r = 0;
+  while (__dim--) {
+    __r += __a[0] * __a[0];
+    ++__a;
+  }
+
+  return __ocml_rsqrt_f64(__r);
+}
+
+__DEVICE__
+double rnorm3d(double __x, double __y, double __z) {
+  return __ocml_rlen3_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double rnorm4d(double __x, double __y, double __z, double __w) {
+  return __ocml_rlen4_f64(__x, __y, __z, __w);
+}
+
+__DEVICE__
+double round(double __x) { return __ocml_round_f64(__x); }
+
+__DEVICE__
+double rsqrt(double __x) { return __ocml_rsqrt_f64(__x); }
+
+__DEVICE__
+double scalbln(double __x, long int __n) {
+  return (__n < INT_MAX) ? __ocml_scalbn_f64(__x, __n)
+                         : __ocml_scalb_f64(__x, __n);
+}
+__DEVICE__
+double scalbn(double __x, int __n) { return __ocml_scalbn_f64(__x, __n); }
+
+__DEVICE__
+__RETURN_TYPE __signbit(double __x) { return __ocml_signbit_f64(__x); }
+
+__DEVICE__
+double sin(double __x) { return __ocml_sin_f64(__x); }
+
+__DEVICE__
+void sincos(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincos_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+void sincospi(double __x, double *__sinptr, double *__cosptr) {
+  double __tmp;
+  *__sinptr = __ocml_sincospi_f64(
+      __x, (__attribute__((address_space(5))) double *)&__tmp);
+  *__cosptr = __tmp;
+}
+
+__DEVICE__
+double sinh(double __x) { return __ocml_sinh_f64(__x); }
+
+__DEVICE__
+double sinpi(double __x) { return __ocml_sinpi_f64(__x); }
+
+__DEVICE__
+double sqrt(double __x) { return __ocml_sqrt_f64(__x); }
+
+__DEVICE__
+double tan(double __x) { return __ocml_tan_f64(__x); }
+
+__DEVICE__
+double tanh(double __x) { return __ocml_tanh_f64(__x); }
+
+__DEVICE__
+double tgamma(double __x) { return __ocml_tgamma_f64(__x); }
+
+__DEVICE__
+double trunc(double __x) { return __ocml_trunc_f64(__x); }
+
+__DEVICE__
+double y0(double __x) { return __ocml_y0_f64(__x); }
+
+__DEVICE__
+double y1(double __x) { return __ocml_y1_f64(__x); }
+
+__DEVICE__
+double yn(int __n, double __x) { // TODO: we could use Ahmes multiplication
+                                 // and the Miller & Brown algorithm
+  //       for linear recurrences to get O(log n) steps, but it's unclear if
+  //       it'd be beneficial in this case. Placeholder until OCML adds
+  //       support.
+  if (__n == 0)
+    return y0(__x);
+  if (__n == 1)
+    return y1(__x);
+
+  double __x0 = y0(__x);
+  double __x1 = y1(__x);
+  for (int __i = 1; __i < __n; ++__i) {
+    double __x2 = (2 * __i) / __x * __x1 - __x0;
+    __x0 = __x1;
+    __x1 = __x2;
+  }
+
+  return __x1;
+}
+
+// BEGIN INTRINSICS
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dadd_rd(double __x, double __y) {
+  return __ocml_add_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __dadd_rn(double __x, double __y) {
+  return __ocml_add_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dadd_ru(double __x, double __y) {
+  return __ocml_add_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __dadd_rz(double __x, double __y) {
+  return __ocml_add_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __ddiv_rd(double __x, double __y) {
+  return __ocml_div_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __ddiv_rn(double __x, double __y) {
+  return __ocml_div_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __ddiv_ru(double __x, double __y) {
+  return __ocml_div_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __ddiv_rz(double __x, double __y) {
+  return __ocml_div_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __dmul_rd(double __x, double __y) {
+  return __ocml_mul_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __dmul_rn(double __x, double __y) {
+  return __ocml_mul_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dmul_ru(double __x, double __y) {
+  return __ocml_mul_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __dmul_rz(double __x, double __y) {
+  return __ocml_mul_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __drcp_rd(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#endif
+__DEVICE__
+double __drcp_rn(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __drcp_ru(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+
+__DEVICE__
+double __drcp_rz(double __x) { return __llvm_amdgcn_rcp_f64(__x); }
+
+__DEVICE__
+double __dsqrt_rd(double __x) { return __ocml_sqrt_rtn_f64(__x); }
+#endif
+__DEVICE__
+double __dsqrt_rn(double __x) { return __ocml_sqrt_rte_f64(__x); }
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dsqrt_ru(double __x) { return __ocml_sqrt_rtp_f64(__x); }
+
+__DEVICE__
+double __dsqrt_rz(double __x) { return __ocml_sqrt_rtz_f64(__x); }
+
+__DEVICE__
+double __dsub_rd(double __x, double __y) {
+  return __ocml_sub_rtn_f64(__x, __y);
+}
+#endif
+__DEVICE__
+double __dsub_rn(double __x, double __y) {
+  return __ocml_sub_rte_f64(__x, __y);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __dsub_ru(double __x, double __y) {
+  return __ocml_sub_rtp_f64(__x, __y);
+}
+
+__DEVICE__
+double __dsub_rz(double __x, double __y) {
+  return __ocml_sub_rtz_f64(__x, __y);
+}
+
+__DEVICE__
+double __fma_rd(double __x, double __y, double __z) {
+  return __ocml_fma_rtn_f64(__x, __y, __z);
+}
+#endif
+__DEVICE__
+double __fma_rn(double __x, double __y, double __z) {
+  return __ocml_fma_rte_f64(__x, __y, __z);
+}
+#if defined OCML_BASIC_ROUNDED_OPERATIONS
+__DEVICE__
+double __fma_ru(double __x, double __y, double __z) {
+  return __ocml_fma_rtp_f64(__x, __y, __z);
+}
+
+__DEVICE__
+double __fma_rz(double __x, double __y, double __z) {
+  return __ocml_fma_rtz_f64(__x, __y, __z);
+}
+#endif
+// END INTRINSICS
+// END DOUBLE
+
+// C only macros
+#if !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
+#define isfinite(__x) _Generic((__x), float : __finitef, double : __finite)(__x)
+#define isinf(__x) _Generic((__x), float : __isinff, double : __isinf)(__x)
+#define isnan(__x) _Generic((__x), float : __isnanf, double : __isnan)(__x)
+#define signbit(__x)                                                           \
+  _Generic((__x), float : __signbitf, double : __signbit)(__x)
+#endif // !defined(__cplusplus) && __STDC_VERSION__ >= 201112L
+
+#if defined(__cplusplus)
+template <class T> __DEVICE__ T min(T __arg1, T __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+
+template <class T> __DEVICE__ T max(T __arg1, T __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__ int min(int __arg1, int __arg2) {
+  return (__arg1 < __arg2) ? __arg1 : __arg2;
+}
+__DEVICE__ int max(int __arg1, int __arg2) {
+  return (__arg1 > __arg2) ? __arg1 : __arg2;
+}
+
+__DEVICE__
+float max(float __x, float __y) { return fmaxf(__x, __y); }
+
+__DEVICE__
+double max(double __x, double __y) { return fmax(__x, __y); }
+
+__DEVICE__
+float min(float __x, float __y) { return fminf(__x, __y); }
+
+__DEVICE__
+double min(double __x, double __y) { return fmin(__x, __y); }
+
+__host__ inline static int min(int __arg1, int __arg2) {
+  return std::min(__arg1, __arg2);
+}
+
+__host__ inline static int max(int __arg1, int __arg2) {
+  return std::max(__arg1, __arg2);
+}
+#endif
+
+#pragma pop_macro("__DEVICE__")
+#pragma pop_macro("__RETURN_TYPE")
+
+#endif // __CLANG_HIP_MATH_H__
diff --git a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_runtime_wrapper.h
similarity index 93%
copy from darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
copy to linux-x86/lib64/clang/12.0.1/include/__clang_hip_runtime_wrapper.h
index addae56..81a16a2 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/__clang_hip_runtime_wrapper.h
+++ b/linux-x86/lib64/clang/12.0.1/include/__clang_hip_runtime_wrapper.h
@@ -28,6 +28,10 @@
 #define __shared__ __attribute__((shared))
 #define __constant__ __attribute__((constant))
 
+#if !defined(__cplusplus) || __cplusplus < 201103L
+  #define nullptr NULL;
+#endif
+
 #if __HIP_ENABLE_DEVICE_MALLOC__
 extern "C" __device__ void *__hip_malloc(size_t __size);
 extern "C" __device__ void *__hip_free(void *__ptr);
@@ -51,6 +55,7 @@
 
 #if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
 #include <__clang_cuda_math_forward_declares.h>
+#include <__clang_hip_cmath.h>
 #include <__clang_cuda_complex_builtins.h>
 
 #include <algorithm>
diff --git a/linux-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h b/linux-x86/lib64/clang/12.0.1/include/__stddef_max_align_t.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__stddef_max_align_t.h
rename to linux-x86/lib64/clang/12.0.1/include/__stddef_max_align_t.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h b/linux-x86/lib64/clang/12.0.1/include/__wmmintrin_aes.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__wmmintrin_aes.h
rename to linux-x86/lib64/clang/12.0.1/include/__wmmintrin_aes.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h b/linux-x86/lib64/clang/12.0.1/include/__wmmintrin_pclmul.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/__wmmintrin_pclmul.h
rename to linux-x86/lib64/clang/12.0.1/include/__wmmintrin_pclmul.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/adxintrin.h b/linux-x86/lib64/clang/12.0.1/include/adxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/adxintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/adxintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/altivec.h b/linux-x86/lib64/clang/12.0.1/include/altivec.h
similarity index 94%
copy from darwin-x86/lib64/clang/11.0.5/include/altivec.h
copy to linux-x86/lib64/clang/12.0.1/include/altivec.h
index 9a40092..1d7bc20 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/altivec.h
+++ b/linux-x86/lib64/clang/12.0.1/include/altivec.h
@@ -1709,6 +1709,20 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+#endif
+
 #ifdef __POWER9_VECTOR__
 /* vec_cmpne */
 
@@ -1766,36 +1780,26 @@
                                                     (vector int)__b);
 }
 
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector bool long long __a, vector bool long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector signed long long __a, vector signed long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
-}
-
 static __inline__ vector bool int __ATTRS_o_ai
 vec_cmpne(vector float __a, vector float __b) {
   return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
                                                     (vector int)__b);
 }
 
-static __inline__ vector bool long long __ATTRS_o_ai
-vec_cmpne(vector double __a, vector double __b) {
-  return (vector bool long long)
-    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b));
 }
 
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
+  return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
+      (vector bool __int128)__a, (vector bool __int128)__b));
+}
+#endif
+
 /* vec_cmpnez */
 
 static __inline__ vector bool char __ATTRS_o_ai
@@ -1900,6 +1904,86 @@
   return __builtin_altivec_vprtybd(__a);
 }
 
+#else
+/* vec_cmpne */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector bool char __a, vector bool char __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector signed char __a, vector signed char __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector bool short __a, vector bool short __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector signed short __a, vector signed short __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector bool int __a, vector bool int __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector signed int __a, vector signed int __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector float __a, vector float __b) {
+  return ~(vec_cmpeq(__a, __b));
+}
+#endif
+
+#ifdef __POWER8_VECTOR__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector bool long long __a, vector bool long long __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector signed long long __a, vector signed long long __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+#endif
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector double __a, vector double __b) {
+  return (vector bool long long)
+    ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
 #endif
 
 /* vec_cmpgt */
@@ -1962,6 +2046,20 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpgtsq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (vector bool __int128)__builtin_altivec_vcmpgtuq(
+      (vector bool __int128)__a, (vector bool __int128)__b);
+}
+#endif
+
 /* vec_cmpge */
 
 static __inline__ vector bool char __ATTRS_o_ai
@@ -2022,6 +2120,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpge(vector signed __int128 __a, vector signed __int128 __b) {
+  return ~(vec_cmpgt(__b, __a));
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmpge(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return ~(vec_cmpgt(__b, __a));
+}
+#endif
+
 /* vec_vcmpgefp */
 
 static __inline__ vector bool int __attribute__((__always_inline__))
@@ -2134,6 +2244,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmple(vector signed __int128 __a, vector signed __int128 __b) {
+  return vec_cmpge(__b, __a);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmple(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return vec_cmpge(__b, __a);
+}
+#endif
+
 /* vec_cmplt */
 
 static __inline__ vector bool char __ATTRS_o_ai
@@ -2178,6 +2300,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmplt(vector signed __int128 __a, vector signed __int128 __b) {
+  return vec_cmpgt(__b, __a);
+}
+
+static __inline__ vector bool __int128 __ATTRS_o_ai
+vec_cmplt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return vec_cmpgt(__b, __a);
+}
+#endif
+
 #ifdef __POWER8_VECTOR__
 static __inline__ vector bool long long __ATTRS_o_ai
 vec_cmplt(vector signed long long __a, vector signed long long __b) {
@@ -2702,67 +2836,67 @@
 }
 
 #if defined(__powerpc64__)
-static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
+static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(const signed char *__a,
                                                              size_t __b) {
   return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len(unsigned char *__a, size_t __b) {
+vec_xl_len(const unsigned char *__a, size_t __b) {
   return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
+static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(const signed short *__a,
                                                               size_t __b) {
   return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_len(unsigned short *__a, size_t __b) {
+vec_xl_len(const unsigned short *__a, size_t __b) {
   return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
+static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(const signed int *__a,
                                                             size_t __b) {
   return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
+static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(const unsigned int *__a,
                                                               size_t __b) {
   return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
+static __inline__ vector float __ATTRS_o_ai vec_xl_len(const float *__a, size_t __b) {
   return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_len(signed __int128 *__a, size_t __b) {
+vec_xl_len(const signed __int128 *__a, size_t __b) {
   return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_len(unsigned __int128 *__a, size_t __b) {
+vec_xl_len(const unsigned __int128 *__a, size_t __b) {
   return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_len(signed long long *__a, size_t __b) {
+vec_xl_len(const signed long long *__a, size_t __b) {
   return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_len(unsigned long long *__a, size_t __b) {
+vec_xl_len(const unsigned long long *__a, size_t __b) {
   return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
-static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
+static __inline__ vector double __ATTRS_o_ai vec_xl_len(const double *__a,
                                                         size_t __b) {
   return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_len_r(unsigned char *__a, size_t __b) {
+vec_xl_len_r(const unsigned char *__a, size_t __b) {
   vector unsigned char __res =
       (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
 #ifdef __LITTLE_ENDIAN__
@@ -2951,6 +3085,42 @@
 
 #define vec_vctuxs __builtin_altivec_vctuxs
 
+/* vec_signext */
+
+#ifdef __POWER9_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signexti(vector signed char __a) {
+  return __builtin_altivec_vextsb2w(__a);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signexti(vector signed short __a) {
+  return __builtin_altivec_vextsh2w(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed char __a) {
+  return __builtin_altivec_vextsb2d(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed short __a) {
+  return __builtin_altivec_vextsh2d(__a);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signextll(vector signed int __a) {
+  return __builtin_altivec_vextsw2d(__a);
+}
+#endif
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_signextq(vector signed long long __a) {
+  return __builtin_altivec_vextsd2q(__a);
+}
+#endif
+
 /* vec_signed */
 
 static __inline__ vector signed int __ATTRS_o_ai
@@ -3288,6 +3458,66 @@
 }
 #endif
 
+/* vec_dive */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_dive(vector signed int __a, vector signed int __b) {
+  return __builtin_altivec_vdivesw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_dive(vector unsigned int __a, vector unsigned int __b) {
+  return __builtin_altivec_vdiveuw(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_dive(vector signed long long __a, vector signed long long __b) {
+  return __builtin_altivec_vdivesd(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_dive(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vdiveud(__a, __b);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_dive(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __builtin_altivec_vdiveuq(__a, __b);
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_dive(vector signed __int128 __a, vector signed __int128 __b) {
+  return __builtin_altivec_vdivesq(__a, __b);
+}
+#endif
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_div(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __a / __b;
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_div(vector signed __int128 __a, vector signed __int128 __b) {
+  return __a / __b;
+}
+#endif __POWER10_VECTOR__
+
+/* vec_xvtdiv */
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_test_swdiv(vector double __a,
+                                                  vector double __b) {
+  return __builtin_vsx_xvtdivdp(__a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_test_swdivs(vector float __a,
+                                                   vector float __b) {
+  return __builtin_vsx_xvtdivsp(__a, __b);
+}
+#endif
+
 /* vec_dss */
 
 #define vec_dss __builtin_altivec_dss
@@ -3300,23 +3530,19 @@
 
 /* vec_dst */
 #define vec_dst(__PTR, __CW, __STR) \
-  __extension__(                    \
-      { __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dst((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_dstst */
 #define vec_dstst(__PTR, __CW, __STR) \
-  __extension__(                      \
-      { __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dstst((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_dststt */
 #define vec_dststt(__PTR, __CW, __STR) \
-  __extension__(                       \
-      { __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dststt((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_dstt */
 #define vec_dstt(__PTR, __CW, __STR) \
-  __extension__(                     \
-      { __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR)); })
+  __builtin_altivec_dstt((const void *)(__PTR), (__CW), (__STR))
 
 /* vec_eqv */
 
@@ -5467,6 +5693,16 @@
   return __builtin_altivec_vmsumuhm(__a, __b, __c);
 }
 
+/* vec_msumc */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_msumc(vector unsigned long long __a, vector unsigned long long __b,
+          vector unsigned __int128 __c) {
+  return __builtin_altivec_vmsumcud(__a, __b, __c);
+}
+#endif
+
 /* vec_vmsummbm */
 
 static __inline__ vector int __attribute__((__always_inline__))
@@ -5693,6 +5929,26 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mule(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmulosd(__a, __b);
+#else
+  return __builtin_altivec_vmulesd(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mule(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmuloud(__a, __b);
+#else
+  return __builtin_altivec_vmuleud(__a, __b);
+#endif
+}
+#endif
+
 /* vec_vmulesb */
 
 static __inline__ vector short __attribute__((__always_inline__))
@@ -5737,6 +5993,30 @@
 #endif
 }
 
+/* vec_mulh */
+
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mulh(vector signed int __a, vector signed int __b) {
+  return __builtin_altivec_vmulhsw(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mulh(vector unsigned int __a, vector unsigned int __b) {
+  return __builtin_altivec_vmulhuw(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mulh(vector signed long long __a, vector signed long long __b) {
+  return __builtin_altivec_vmulhsd(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mulh(vector unsigned long long __a, vector unsigned long long __b) {
+  return __builtin_altivec_vmulhud(__a, __b);
+}
+#endif
+
 /* vec_mulo */
 
 static __inline__ vector short __ATTRS_o_ai vec_mulo(vector signed char __a,
@@ -5795,6 +6075,26 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mulo(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmulesd(__a, __b);
+#else
+  return __builtin_altivec_vmulosd(__a, __b);
+#endif
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mulo(vector unsigned long long __a, vector unsigned long long __b) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vmuleud(__a, __b);
+#else
+  return __builtin_altivec_vmuloud(__a, __b);
+#endif
+}
+#endif
+
 /* vec_vmulosb */
 
 static __inline__ vector short __attribute__((__always_inline__))
@@ -7771,6 +8071,18 @@
   return __builtin_altivec_vrsqrtefp(__a);
 }
 
+/* vec_xvtsqrt */
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_test_swsqrt(vector double __a) {
+  return __builtin_vsx_xvtsqrtdp(__a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_test_swsqrts(vector float __a) {
+  return __builtin_vsx_xvtsqrtsp(__a);
+}
+#endif
+
 /* vec_sel */
 
 #define __builtin_altivec_vsel_4si vec_sel
@@ -13900,6 +14212,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+}
+#endif
+
 /* vec_all_ge */
 
 static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed char __a,
@@ -14071,6 +14395,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __b, __a);
+}
+#endif
+
 /* vec_all_gt */
 
 static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed char __a,
@@ -14242,6 +14578,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __a, __b);
+}
+#endif
+
 /* vec_all_in */
 
 static __inline__ int __attribute__((__always_inline__))
@@ -14421,6 +14769,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_le(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ, __a, __b);
+}
+#endif
+
 /* vec_all_lt */
 
 static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed char __a,
@@ -14593,6 +14953,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT, __b, __a);
+}
+#endif
+
 /* vec_all_nan */
 
 static __inline__ int __ATTRS_o_ai vec_all_nan(vector float __a) {
@@ -14797,6 +15169,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+}
+#endif
+
 /* vec_all_nge */
 
 static __inline__ int __ATTRS_o_ai vec_all_nge(vector float __a,
@@ -15042,6 +15426,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
 /* vec_any_ge */
 
 static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed char __a,
@@ -15221,6 +15617,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __b, __a);
+}
+#endif
+
 /* vec_any_gt */
 
 static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed char __a,
@@ -15400,6 +15808,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __a, __b);
+}
+#endif
+
 /* vec_any_le */
 
 static __inline__ int __ATTRS_o_ai vec_any_le(vector signed char __a,
@@ -15579,6 +15999,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_le(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
 /* vec_any_lt */
 
 static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed char __a,
@@ -15758,6 +16190,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __b, __a);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpgtuq_p(__CR6_EQ_REV, __b, __a);
+}
+#endif
+
 /* vec_any_nan */
 
 static __inline__ int __attribute__((__always_inline__))
@@ -15953,6 +16397,18 @@
 }
 #endif
 
+#ifdef __POWER10_VECTOR__
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
+                                              vector signed __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+}
+
+static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
+                                              vector unsigned __int128 __b) {
+  return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
 /* vec_any_nge */
 
 static __inline__ int __attribute__((__always_inline__))
@@ -16353,41 +16809,41 @@
 typedef vector float unaligned_vec_float __attribute__((aligned(1)));
 
 static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
-                                                     signed char *__ptr) {
+                                                     const signed char *__ptr) {
   return *(unaligned_vec_schar *)(__ptr + __offset);
 }
 
 static inline __ATTRS_o_ai vector unsigned char
-vec_xl(signed long long __offset, unsigned char *__ptr) {
+vec_xl(signed long long __offset, const unsigned char *__ptr) {
   return *(unaligned_vec_uchar*)(__ptr + __offset);
 }
 
 static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
-                                                      signed short *__ptr) {
+                                                      const signed short *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_sshort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned short
-vec_xl(signed long long __offset, unsigned short *__ptr) {
+vec_xl(signed long long __offset, const unsigned short *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_ushort *)__addr;
 }
 
 static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
-                                                    signed int *__ptr) {
+                                                    const signed int *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_sint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
-                                                      unsigned int *__ptr) {
+                                                      const unsigned int *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_uint *)__addr;
 }
 
 static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
-                                               float *__ptr) {
+                                               const float *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_float *)__addr;
 }
@@ -16398,19 +16854,19 @@
 typedef vector double unaligned_vec_double __attribute__((aligned(1)));
 
 static inline __ATTRS_o_ai vector signed long long
-vec_xl(signed long long __offset, signed long long *__ptr) {
+vec_xl(signed long long __offset, const signed long long *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_sll *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(signed long long __offset, unsigned long long *__ptr) {
+vec_xl(signed long long __offset, const unsigned long long *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_ull *)__addr;
 }
 
 static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
-                                                double *__ptr) {
+                                                const double *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_double *)__addr;
 }
@@ -16421,13 +16877,13 @@
 typedef vector unsigned __int128 unaligned_vec_ui128
     __attribute__((aligned(1)));
 static inline __ATTRS_o_ai vector signed __int128
-vec_xl(signed long long __offset, signed __int128 *__ptr) {
+vec_xl(signed long long __offset, const signed __int128 *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_si128 *)__addr;
 }
 
 static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
+vec_xl(signed long long __offset, const unsigned __int128 *__ptr) {
   signed char *__addr = (signed char *)__ptr + __offset;
   return *(unaligned_vec_ui128 *)__addr;
 }
@@ -16437,71 +16893,71 @@
 
 #ifdef __LITTLE_ENDIAN__
 static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed char *__ptr) {
+vec_xl_be(signed long long __offset, const signed char *__ptr) {
   vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned char *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned char *__ptr) {
   vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
                                  13, 12, 11, 10, 9, 8);
 }
 
 static __inline__ vector signed short  __ATTRS_o_ai
-vec_xl_be(signed long long __offset, signed short *__ptr) {
+vec_xl_be(signed long long __offset, const signed short *__ptr) {
   vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
 static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, unsigned short *__ptr) {
+vec_xl_be(signed long long __offset, const unsigned short *__ptr) {
   vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
   return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
 }
 
 static __inline__ vector signed int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed int *__ptr) {
+vec_xl_be(signed long long  __offset, const signed int *__ptr) {
   return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
 }
 
 static __inline__ vector unsigned int __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned int *__ptr) {
+vec_xl_be(signed long long  __offset, const unsigned int *__ptr) {
   return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
 }
 
 static __inline__ vector float __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, float *__ptr) {
+vec_xl_be(signed long long  __offset, const float *__ptr) {
   return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
 }
 
 #ifdef __VSX__
 static __inline__ vector signed long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed long long *__ptr) {
+vec_xl_be(signed long long  __offset, const signed long long *__ptr) {
   return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
 }
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned long long *__ptr) {
+vec_xl_be(signed long long  __offset, const unsigned long long *__ptr) {
   return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
 }
 
 static __inline__ vector double __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, double *__ptr) {
+vec_xl_be(signed long long  __offset, const double *__ptr) {
   return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
 }
 #endif
 
 #if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
 static __inline__ vector signed __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, signed __int128 *__ptr) {
+vec_xl_be(signed long long  __offset, const signed __int128 *__ptr) {
   return vec_xl(__offset, __ptr);
 }
 
 static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_be(signed long long  __offset, unsigned __int128 *__ptr) {
+vec_xl_be(signed long long  __offset, const unsigned __int128 *__ptr) {
   return vec_xl(__offset, __ptr);
 }
 #endif
@@ -16509,6 +16965,54 @@
   #define vec_xl_be vec_xl
 #endif
 
+#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+
+/* vect_xl_sext */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed char *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed short *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed int *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_sext(signed long long __offset, const signed long long *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+/* vec_xl_zext */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned char *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned short *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned int *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_zext(signed long long __offset, const unsigned long long *__pointer) {
+  return (vector unsigned __int128)*(__pointer + __offset);
+}
+
+#endif
+
 /* vec_xst */
 
 static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
@@ -16597,6 +17101,58 @@
 }
 #endif
 
+/* vec_xst_trunc */
+
+#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed char *__ptr) {
+  *(__ptr + __offset) = (signed char)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned char *__ptr) {
+  *(__ptr + __offset) = (unsigned char)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed short *__ptr) {
+  *(__ptr + __offset) = (signed short)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned short *__ptr) {
+  *(__ptr + __offset) = (unsigned short)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed int *__ptr) {
+  *(__ptr + __offset) = (signed int)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned int *__ptr) {
+  *(__ptr + __offset) = (unsigned int)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
+                                              signed long long __offset,
+                                              signed long long *__ptr) {
+  *(__ptr + __offset) = (signed long long)__vec[0];
+}
+
+static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
+                                              signed long long __offset,
+                                              unsigned long long *__ptr) {
+  *(__ptr + __offset) = (unsigned long long)__vec[0];
+}
+#endif
+
 /* vec_xst_be */
 
 #ifdef __LITTLE_ENDIAN__
@@ -16763,6 +17319,100 @@
 }
 
 #ifdef __POWER10_VECTOR__
+
+/* vec_extractm */
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned char __a) {
+  return __builtin_altivec_vextractbm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned short __a) {
+  return __builtin_altivec_vextracthm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned int __a) {
+  return __builtin_altivec_vextractwm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned long long __a) {
+  return __builtin_altivec_vextractdm(__a);
+}
+
+static __inline__ unsigned int __ATTRS_o_ai
+vec_extractm(vector unsigned __int128 __a) {
+  return __builtin_altivec_vextractqm(__a);
+}
+
+/* vec_expandm */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_expandm(vector unsigned char __a) {
+  return __builtin_altivec_vexpandbm(__a);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_expandm(vector unsigned short __a) {
+  return __builtin_altivec_vexpandhm(__a);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_expandm(vector unsigned int __a) {
+  return __builtin_altivec_vexpandwm(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_expandm(vector unsigned long long __a) {
+  return __builtin_altivec_vexpanddm(__a);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_expandm(vector unsigned __int128 __a) {
+  return __builtin_altivec_vexpandqm(__a);
+}
+
+/* vec_cntm */
+
+#define vec_cntm(__a, __mp)                                                    \
+  _Generic((__a), vector unsigned char                                         \
+           : __builtin_altivec_vcntmbb((__a), (unsigned int)(__mp)),           \
+             vector unsigned short                                             \
+           : __builtin_altivec_vcntmbh((__a), (unsigned int)(__mp)),           \
+             vector unsigned int                                               \
+           : __builtin_altivec_vcntmbw((__a), (unsigned int)(__mp)),           \
+             vector unsigned long long                                         \
+           : __builtin_altivec_vcntmbd((__a), (unsigned int)(__mp)))
+
+/* vec_gen[b|h|w|d|q]m */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_genbm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrbm(__bm);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_genhm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrhm(__bm);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_genwm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrwm(__bm);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_gendm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrdm(__bm);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_genqm(unsigned long long __bm) {
+  return __builtin_altivec_mtvsrqm(__bm);
+}
+
 /* vec_pdep */
 
 static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -16881,6 +17531,38 @@
   return __builtin_altivec_vctzdm(__a, __b);
 }
 
+/* vec_mod */
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_mod(vector signed int __a, vector signed int __b) {
+  return __a % __b;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_mod(vector unsigned int __a, vector unsigned int __b) {
+  return __a % __b;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mod(vector signed long long __a, vector signed long long __b) {
+  return __a % __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mod(vector unsigned long long __a, vector unsigned long long __b) {
+  return __a % __b;
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_mod(vector signed __int128 __a, vector signed __int128 __b) {
+  return __a % __b;
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return  __a % __b;
+}
+
 /* vec_sldbi */
 
 #define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
@@ -17027,6 +17709,92 @@
 #endif
 }
 
+/* vec_extractl */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+    vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextdubvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextdubvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+    vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduhvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduhvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extractl(
+    vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduwvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduwvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extractl(vector unsigned long long __a, vector unsigned long long __b,
+             unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextddvrx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextddvlx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+/* vec_extracth */
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+    vector unsigned char __a, vector unsigned char __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextdubvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextdubvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+    vector unsigned short __a, vector unsigned short __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduhvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduhvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai vec_extracth(
+    vector unsigned int __a, vector unsigned int __b, unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextduwvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextduwvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extracth(vector unsigned long long __a, vector unsigned long long __b,
+             unsigned int __c) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vextddvlx(__a, __b, __c);
+#else
+  vector unsigned long long __ret = __builtin_altivec_vextddvrx(__a, __b, __c);
+  return vec_sld(__ret, __ret, 8);
+#endif
+}
+
 #ifdef __VSX__
 
 /* vec_permx */
@@ -17095,6 +17863,14 @@
   return __builtin_vsx_xxblendvd(__a, __b, __c);
 }
 
+/* vec_replace_elt */
+
+#define vec_replace_elt __builtin_altivec_vec_replace_elt
+
+/* vec_replace_unaligned */
+
+#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
+
 /* vec_splati */
 
 #define vec_splati(__a)                                                        \
@@ -17146,7 +17922,212 @@
 #endif
   return __a;
 }
+
+/* vec_test_lsbb_all_ones */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_ones(vector unsigned char __a) {
+  return __builtin_vsx_xvtlsbb(__a, 1);
+}
+
+/* vec_test_lsbb_all_zeros */
+
+static __inline__ int __ATTRS_o_ai
+vec_test_lsbb_all_zeros(vector unsigned char __a) {
+  return __builtin_vsx_xvtlsbb(__a, 0);
+}
 #endif /* __VSX__ */
+
+/* vec_stril */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_stril(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr((vector signed char)__a);
+#else
+  return __builtin_altivec_vstribl((vector signed char)__a);
+#endif
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_stril(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr(__a);
+#else
+  return __builtin_altivec_vstribl(__a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_stril(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr((vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihl((vector signed short)__a);
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_stril(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr(__a);
+#else
+  return __builtin_altivec_vstrihl(__a);
+#endif
+}
+
+/* vec_stril_p */
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+#else
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
+#endif
+}
+
+/* vec_strir */
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_strir(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl((vector signed char)__a);
+#else
+  return __builtin_altivec_vstribr((vector signed char)__a);
+#endif
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_strir(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl(__a);
+#else
+  return __builtin_altivec_vstribr(__a);
+#endif
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_strir(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl((vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihr((vector signed short)__a);
+#endif
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_strir(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl(__a);
+#else
+  return __builtin_altivec_vstrihr(__a);
+#endif
+}
+
+/* vec_strir_p */
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+#else
+  return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, (vector signed short)__a);
+#else
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, (vector signed short)__a);
+#endif
+}
+
+static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed short __a) {
+#ifdef __LITTLE_ENDIAN__
+  return __builtin_altivec_vstrihl_p(__CR6_EQ, __a);
+#else
+  return __builtin_altivec_vstrihr_p(__CR6_EQ, __a);
+#endif
+}
+
+/* vs[l | r | ra] */
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sl(vector signed __int128 __a, vector unsigned __int128 __b) {
+  return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sr(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sr(vector signed __int128 __a, vector unsigned __int128 __b) {
+  return (
+      vector signed __int128)(((vector unsigned __int128)__a) >>
+                              (__b %
+                               (vector unsigned __int128)(sizeof(
+                                                              unsigned __int128) *
+                                                          __CHAR_BIT__)));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sra(vector unsigned __int128 __a, vector unsigned __int128 __b) {
+  return (
+      vector unsigned __int128)(((vector signed __int128)__a) >>
+                                (__b %
+                                 (vector unsigned __int128)(sizeof(
+                                                                unsigned __int128) *
+                                                            __CHAR_BIT__)));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) {
+  return __a >> (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
+                                                  __CHAR_BIT__));
+}
+
 #endif /* __POWER10_VECTOR__ */
 
 #undef __ATTRS_o_ai
diff --git a/linux-x86/lib64/clang/11.0.5/include/ammintrin.h b/linux-x86/lib64/clang/12.0.1/include/ammintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ammintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ammintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/amxintrin.h b/linux-x86/lib64/clang/12.0.1/include/amxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/amxintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/amxintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm64intr.h b/linux-x86/lib64/clang/12.0.1/include/arm64intr.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm64intr.h
rename to linux-x86/lib64/clang/12.0.1/include/arm64intr.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_acle.h b/linux-x86/lib64/clang/12.0.1/include/arm_acle.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm_acle.h
rename to linux-x86/lib64/clang/12.0.1/include/arm_acle.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_bf16.h b/linux-x86/lib64/clang/12.0.1/include/arm_bf16.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm_bf16.h
rename to linux-x86/lib64/clang/12.0.1/include/arm_bf16.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_cde.h b/linux-x86/lib64/clang/12.0.1/include/arm_cde.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm_cde.h
rename to linux-x86/lib64/clang/12.0.1/include/arm_cde.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_cmse.h b/linux-x86/lib64/clang/12.0.1/include/arm_cmse.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm_cmse.h
rename to linux-x86/lib64/clang/12.0.1/include/arm_cmse.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_fp16.h b/linux-x86/lib64/clang/12.0.1/include/arm_fp16.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm_fp16.h
rename to linux-x86/lib64/clang/12.0.1/include/arm_fp16.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_mve.h b/linux-x86/lib64/clang/12.0.1/include/arm_mve.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm_mve.h
rename to linux-x86/lib64/clang/12.0.1/include/arm_mve.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/arm_neon.h b/linux-x86/lib64/clang/12.0.1/include/arm_neon.h
similarity index 99%
copy from darwin-x86/lib64/clang/11.0.5/include/arm_neon.h
copy to linux-x86/lib64/clang/12.0.1/include/arm_neon.h
index da1e17c..39ef504 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/arm_neon.h
+++ b/linux-x86/lib64/clang/12.0.1/include/arm_neon.h
@@ -45860,9 +45860,9 @@
   __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vceqd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqd_s64(__p0, __p1);
+__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) {
@@ -46252,9 +46252,9 @@
   __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0);
   return __ret;
 }
-__ai int64_t vceqzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vceqzd_s64(__p0);
+__ai uint64_t vceqzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vceqzd_f64(float64_t __p0) {
@@ -46333,9 +46333,9 @@
   __ret = (uint64x1_t)(__p0 >= __p1);
   return __ret;
 }
-__ai int64_t vcged_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcged_s64(__p0, __p1);
+__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) {
@@ -46523,9 +46523,9 @@
 }
 #endif
 
-__ai int64_t vcgezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgezd_s64(__p0);
+__ai uint64_t vcgezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vcgezd_f64(float64_t __p0) {
@@ -46604,9 +46604,9 @@
   __ret = (uint64x1_t)(__p0 > __p1);
   return __ret;
 }
-__ai int64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
+__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) {
@@ -46794,9 +46794,9 @@
 }
 #endif
 
-__ai int64_t vcgtzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcgtzd_s64(__p0);
+__ai uint64_t vcgtzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vcgtzd_f64(float64_t __p0) {
@@ -46880,9 +46880,9 @@
   __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vcled_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcled_s64(__p0, __p1);
+__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) {
@@ -47065,9 +47065,9 @@
 }
 #endif
 
-__ai int64_t vclezd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vclezd_s64(__p0);
+__ai uint64_t vclezd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vclezd_f64(float64_t __p0) {
@@ -47151,9 +47151,9 @@
   __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vcltd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltd_s64(__p0, __p1);
+__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1);
   return __ret;
 }
 __ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) {
@@ -47336,9 +47336,9 @@
 }
 #endif
 
-__ai int64_t vcltzd_s64(int64_t __p0) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vcltzd_s64(__p0);
+__ai uint64_t vcltzd_s64(int64_t __p0) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0);
   return __ret;
 }
 __ai uint64_t vcltzd_f64(float64_t __p0) {
@@ -57188,30 +57188,30 @@
 }
 #endif
 
-__ai int16_t vqmovuns_s32(int32_t __p0) {
-  int16_t __ret;
-  __ret = (int16_t) __builtin_neon_vqmovuns_s32(__p0);
+__ai uint16_t vqmovuns_s32(int32_t __p0) {
+  uint16_t __ret;
+  __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0);
   return __ret;
 }
-__ai int32_t vqmovund_s64(int64_t __p0) {
-  int32_t __ret;
-  __ret = (int32_t) __builtin_neon_vqmovund_s64(__p0);
+__ai uint32_t vqmovund_s64(int64_t __p0) {
+  uint32_t __ret;
+  __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0);
   return __ret;
 }
-__ai int8_t vqmovunh_s16(int16_t __p0) {
-  int8_t __ret;
-  __ret = (int8_t) __builtin_neon_vqmovunh_s16(__p0);
+__ai uint8_t vqmovunh_s16(int16_t __p0) {
+  uint8_t __ret;
+  __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0);
   return __ret;
 }
 #ifdef __LITTLE_ENDIAN__
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
   uint16x8_t __ret;
   __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1));
   return __ret;
 }
 #else
-__ai uint16x8_t vqmovun_high_s32(int16x4_t __p0, int32x4_t __p1) {
-  int16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
+__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) {
+  uint16x4_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0);
   int32x4_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0);
   uint16x8_t __ret;
   __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1));
@@ -57221,14 +57221,14 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
   uint32x4_t __ret;
   __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1));
   return __ret;
 }
 #else
-__ai uint32x4_t vqmovun_high_s64(int32x2_t __p0, int64x2_t __p1) {
-  int32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
+__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) {
+  uint32x2_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0);
   int64x2_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0);
   uint32x4_t __ret;
   __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1));
@@ -57238,14 +57238,14 @@
 #endif
 
 #ifdef __LITTLE_ENDIAN__
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
   uint8x16_t __ret;
   __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1));
   return __ret;
 }
 #else
-__ai uint8x16_t vqmovun_high_s16(int8x8_t __p0, int16x8_t __p1) {
-  int8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
+__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) {
+  uint8x8_t __rev0;  __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0);
   int16x8_t __rev1;  __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0);
   uint8x16_t __ret;
   __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1));
@@ -57549,22 +57549,22 @@
 })
 #endif
 
-__ai uint8_t vqrshlb_u8(uint8_t __p0, uint8_t __p1) {
+__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) {
   uint8_t __ret;
   __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1);
   return __ret;
 }
-__ai uint32_t vqrshls_u32(uint32_t __p0, uint32_t __p1) {
+__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) {
   uint32_t __ret;
   __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1);
   return __ret;
 }
-__ai uint64_t vqrshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1);
   return __ret;
 }
-__ai uint16_t vqrshlh_u16(uint16_t __p0, uint16_t __p1) {
+__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) {
   uint16_t __ret;
   __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1);
   return __ret;
@@ -57832,22 +57832,22 @@
   __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \
   __ret; \
 })
-__ai uint8_t vqshlb_u8(uint8_t __p0, uint8_t __p1) {
+__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) {
   uint8_t __ret;
   __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1);
   return __ret;
 }
-__ai uint32_t vqshls_u32(uint32_t __p0, uint32_t __p1) {
+__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) {
   uint32_t __ret;
   __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1);
   return __ret;
 }
-__ai uint64_t vqshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1);
   return __ret;
 }
-__ai uint16_t vqshlh_u16(uint16_t __p0, uint16_t __p1) {
+__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) {
   uint16_t __ret;
   __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1);
   return __ret;
@@ -59452,7 +59452,7 @@
   __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0);
   return __ret;
 }
-__ai uint64_t vrshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1);
   return __ret;
@@ -59853,7 +59853,7 @@
   __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \
   __ret; \
 })
-__ai uint64_t vshld_u64(uint64_t __p0, uint64_t __p1) {
+__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) {
   uint64_t __ret;
   __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1);
   return __ret;
@@ -62423,9 +62423,9 @@
   __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1);
   return __ret;
 }
-__ai int64_t vtstd_s64(int64_t __p0, int64_t __p1) {
-  int64_t __ret;
-  __ret = (int64_t) __builtin_neon_vtstd_s64(__p0, __p1);
+__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) {
+  uint64_t __ret;
+  __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1);
   return __ret;
 }
 __ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) {
diff --git a/linux-x86/lib64/clang/11.0.5/include/arm_sve.h b/linux-x86/lib64/clang/12.0.1/include/arm_sve.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/arm_sve.h
rename to linux-x86/lib64/clang/12.0.1/include/arm_sve.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/armintr.h b/linux-x86/lib64/clang/12.0.1/include/armintr.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/armintr.h
rename to linux-x86/lib64/clang/12.0.1/include/armintr.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx2intrin.h b/linux-x86/lib64/clang/12.0.1/include/avx2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx2intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx2intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512bf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512bf16intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512bf16intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512bitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512bitalgintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512bitalgintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512bwintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512bwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512bwintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512bwintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512cdintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512cdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512cdintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512cdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512dqintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512dqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512dqintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512dqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512erintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512erintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512erintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512erintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512fintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512fintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512fintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512fintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512ifmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512ifmaintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512ifmaintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512ifmavlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512ifmavlintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512ifmavlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512pfintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512pfintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512pfintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512pfintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vbmi2intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vbmi2intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vbmiintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vbmiintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vbmiintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vbmivlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vbmivlintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vbmivlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlbf16intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlbf16intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlbf16intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlbitalgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlbitalgintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlbitalgintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlbwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlbwintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlbwintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlcdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlcdintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlcdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vldqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vldqintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vldqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlvbmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlvbmi2intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlvbmi2intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlvnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlvnniintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlvnniintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vlvp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vlvp2intersectintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vlvp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vnniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vnniintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vnniintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vp2intersectintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vp2intersectintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vp2intersectintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vpopcntdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vpopcntdqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h b/linux-x86/lib64/clang/12.0.1/include/avx512vpopcntdqvlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avx512vpopcntdqvlintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avx512vpopcntdqvlintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/avxintrin.h b/linux-x86/lib64/clang/12.0.1/include/avxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/avxintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/avxintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/bits/stdatomic.h b/linux-x86/lib64/clang/12.0.1/include/bits/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/bits/stdatomic.h
rename to linux-x86/lib64/clang/12.0.1/include/bits/stdatomic.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/bmi2intrin.h b/linux-x86/lib64/clang/12.0.1/include/bmi2intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/bmi2intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/bmi2intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/bmiintrin.h b/linux-x86/lib64/clang/12.0.1/include/bmiintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/bmiintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/bmiintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/cet.h b/linux-x86/lib64/clang/12.0.1/include/cet.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/cet.h
rename to linux-x86/lib64/clang/12.0.1/include/cet.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/cetintrin.h b/linux-x86/lib64/clang/12.0.1/include/cetintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/cetintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/cetintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/cldemoteintrin.h b/linux-x86/lib64/clang/12.0.1/include/cldemoteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/cldemoteintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/cldemoteintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/clflushoptintrin.h b/linux-x86/lib64/clang/12.0.1/include/clflushoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/clflushoptintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/clflushoptintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/clwbintrin.h b/linux-x86/lib64/clang/12.0.1/include/clwbintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/clwbintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/clwbintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/clzerointrin.h b/linux-x86/lib64/clang/12.0.1/include/clzerointrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/clzerointrin.h
rename to linux-x86/lib64/clang/12.0.1/include/clzerointrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/cpuid.h b/linux-x86/lib64/clang/12.0.1/include/cpuid.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/cpuid.h
rename to linux-x86/lib64/clang/12.0.1/include/cpuid.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm b/linux-x86/lib64/clang/12.0.1/include/cuda_wrappers/algorithm
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/algorithm
rename to linux-x86/lib64/clang/12.0.1/include/cuda_wrappers/algorithm
diff --git a/linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex b/linux-x86/lib64/clang/12.0.1/include/cuda_wrappers/complex
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/complex
rename to linux-x86/lib64/clang/12.0.1/include/cuda_wrappers/complex
diff --git a/linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/new b/linux-x86/lib64/clang/12.0.1/include/cuda_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/cuda_wrappers/new
rename to linux-x86/lib64/clang/12.0.1/include/cuda_wrappers/new
diff --git a/linux-x86/lib64/clang/11.0.5/include/emmintrin.h b/linux-x86/lib64/clang/12.0.1/include/emmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/emmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/emmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/enqcmdintrin.h b/linux-x86/lib64/clang/12.0.1/include/enqcmdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/enqcmdintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/enqcmdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/f16cintrin.h b/linux-x86/lib64/clang/12.0.1/include/f16cintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/f16cintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/f16cintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/float.h b/linux-x86/lib64/clang/12.0.1/include/float.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/float.h
rename to linux-x86/lib64/clang/12.0.1/include/float.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/fma4intrin.h b/linux-x86/lib64/clang/12.0.1/include/fma4intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/fma4intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/fma4intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/fmaintrin.h b/linux-x86/lib64/clang/12.0.1/include/fmaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/fmaintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/fmaintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h b/linux-x86/lib64/clang/12.0.1/include/fuzzer/FuzzedDataProvider.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/fuzzer/FuzzedDataProvider.h
rename to linux-x86/lib64/clang/12.0.1/include/fuzzer/FuzzedDataProvider.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/fxsrintrin.h b/linux-x86/lib64/clang/12.0.1/include/fxsrintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/fxsrintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/fxsrintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/gfniintrin.h b/linux-x86/lib64/clang/12.0.1/include/gfniintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/gfniintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/gfniintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/htmintrin.h b/linux-x86/lib64/clang/12.0.1/include/htmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/htmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/htmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/htmxlintrin.h b/linux-x86/lib64/clang/12.0.1/include/htmxlintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/htmxlintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/htmxlintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h b/linux-x86/lib64/clang/12.0.1/include/ia32intrin.h
similarity index 78%
copy from darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h
copy to linux-x86/lib64/clang/12.0.1/include/ia32intrin.h
index 79b7f06..00138ef 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/ia32intrin.h
+++ b/linux-x86/lib64/clang/12.0.1/include/ia32intrin.h
@@ -14,6 +14,18 @@
 #ifndef __IA32INTRIN_H
 #define __IA32INTRIN_H
 
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__))
+#define __DEFAULT_FN_ATTRS_SSE42 __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__)) constexpr
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CAST __attribute__((__always_inline__))
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
 /** Find the first set bit starting from the lsb. Result is undefined if
  *  input is 0.
  *
@@ -26,7 +38,7 @@
  *     A 32-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfd(int __A) {
   return __builtin_ctz(__A);
 }
@@ -43,7 +55,7 @@
  *     A 32-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrd(int __A) {
   return 31 - __builtin_clz(__A);
 }
@@ -59,12 +71,12 @@
  *     A 32-bit integer operand.
  *  \returns A 32-bit integer containing the swapped bytes.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapd(int __A) {
   return __builtin_bswap32(__A);
 }
 
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 _bswap(int __A) {
   return __builtin_bswap32(__A);
 }
@@ -85,7 +97,7 @@
  *     A 64-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsfq(long long __A) {
   return __builtin_ctzll(__A);
 }
@@ -102,7 +114,7 @@
  *     A 64-bit integer operand.
  *  \returns A 32-bit integer containing the bit number.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __bsrq(long long __A) {
   return 63 - __builtin_clzll(__A);
 }
@@ -118,7 +130,7 @@
  *     A 64-bit integer operand.
  *  \returns A 64-bit integer containing the swapped bytes.
  */
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __bswapq(long long __A) {
   return __builtin_bswap64(__A);
 }
@@ -138,7 +150,7 @@
  *  \returns A 32-bit integer containing the number of bits with value 1 in the
  *     source operand.
  */
-static __inline__ int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 __popcntd(unsigned int __A)
 {
   return __builtin_popcount(__A);
@@ -159,7 +171,7 @@
  *  \returns A 64-bit integer containing the number of bits with value 1 in the
  *     source operand.
  */
-static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __popcntq(unsigned long long __A)
 {
   return __builtin_popcountll(__A);
@@ -169,26 +181,26 @@
 #endif /* __x86_64__ */
 
 #ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __readeflags(void)
 {
   return __builtin_ia32_readeflags_u64();
 }
 
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
 __writeeflags(unsigned long long __f)
 {
   __builtin_ia32_writeeflags_u64(__f);
 }
 
 #else /* !__x86_64__ */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
 __readeflags(void)
 {
   return __builtin_ia32_readeflags_u32();
 }
 
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
 __writeeflags(unsigned int __f)
 {
   __builtin_ia32_writeeflags_u32(__f);
@@ -205,11 +217,9 @@
  *     A 32-bit float value.
  *  \returns a 32-bit unsigned integer containing the converted value.
  */
-static __inline__ unsigned int __attribute__((__always_inline__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST
 _castf32_u32(float __A) {
-  unsigned int D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(unsigned int, __A);
 }
 
 /** Cast a 64-bit float value to a 64-bit unsigned integer value
@@ -222,11 +232,9 @@
  *     A 64-bit float value.
  *  \returns a 64-bit unsigned integer containing the converted value.
  */
-static __inline__ unsigned long long __attribute__((__always_inline__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST
 _castf64_u64(double __A) {
-  unsigned long long D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(unsigned long long, __A);
 }
 
 /** Cast a 32-bit unsigned integer value to a 32-bit float value
@@ -239,11 +247,9 @@
  *     A 32-bit unsigned integer value.
  *  \returns a 32-bit float value containing the converted value.
  */
-static __inline__ float __attribute__((__always_inline__))
+static __inline__ float __DEFAULT_FN_ATTRS_CAST
 _castu32_f32(unsigned int __A) {
-  float D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(float, __A);
 }
 
 /** Cast a 64-bit unsigned integer value to a 64-bit float value
@@ -256,11 +262,9 @@
  *     A 64-bit unsigned integer value.
  *  \returns a 64-bit float value containing the converted value.
  */
-static __inline__ double __attribute__((__always_inline__))
+static __inline__ double __DEFAULT_FN_ATTRS_CAST
 _castu64_f64(unsigned long long __A) {
-  double D;
-  __builtin_memcpy(&D, &__A, sizeof(__A));
-  return D;
+  return __builtin_bit_cast(double, __A);
 }
 
 /** Adds the unsigned integer operand to the CRC-32C checksum of the
@@ -278,7 +282,7 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
 __crc32b(unsigned int __C, unsigned char __D)
 {
   return __builtin_ia32_crc32qi(__C, __D);
@@ -299,7 +303,7 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
 __crc32w(unsigned int __C, unsigned short __D)
 {
   return __builtin_ia32_crc32hi(__C, __D);
@@ -320,7 +324,7 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_SSE42
 __crc32d(unsigned int __C, unsigned int __D)
 {
   return __builtin_ia32_crc32si(__C, __D);
@@ -342,20 +346,20 @@
  *  \returns The result of adding operand \a __C to the CRC-32C checksum of
  *     operand \a __D.
  */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_SSE42
 __crc32q(unsigned long long __C, unsigned long long __D)
 {
   return __builtin_ia32_crc32di(__C, __D);
 }
 #endif /* __x86_64__ */
 
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __rdpmc(int __A) {
   return __builtin_ia32_rdpmc(__A);
 }
 
 /* __rdtscp */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
 __rdtscp(unsigned int *__A) {
   return __builtin_ia32_rdtscp(__A);
 }
@@ -364,48 +368,48 @@
 
 #define _rdpmc(A) __rdpmc(A)
 
-static __inline__ void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __DEFAULT_FN_ATTRS
 _wbinvd(void) {
   __builtin_ia32_wbinvd();
 }
 
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolb(unsigned char __X, int __C) {
   return __builtin_rotateleft8(__X, __C);
 }
 
-static __inline__ unsigned char __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned char __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorb(unsigned char __X, int __C) {
   return __builtin_rotateright8(__X, __C);
 }
 
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolw(unsigned short __X, int __C) {
   return __builtin_rotateleft16(__X, __C);
 }
 
-static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned short __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorw(unsigned short __X, int __C) {
   return __builtin_rotateright16(__X, __C);
 }
 
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rold(unsigned int __X, int __C) {
   return __builtin_rotateleft32(__X, __C);
 }
 
-static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
 __rord(unsigned int __X, int __C) {
   return __builtin_rotateright32(__X, __C);
 }
 
 #ifdef __x86_64__
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rolq(unsigned long long __X, int __C) {
   return __builtin_rotateleft64(__X, __C);
 }
 
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
 __rorq(unsigned long long __X, int __C) {
   return __builtin_rotateright64(__X, __C);
 }
@@ -429,4 +433,9 @@
 #define _rotwl(a,b) __rolw((a), (b))
 #define _rotwr(a,b) __rorw((a), (b))
 
+#undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CAST
+#undef __DEFAULT_FN_ATTRS_SSE42
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
+
 #endif /* __IA32INTRIN_H */
diff --git a/darwin-x86/lib64/clang/11.0.5/include/immintrin.h b/linux-x86/lib64/clang/12.0.1/include/immintrin.h
similarity index 98%
copy from darwin-x86/lib64/clang/11.0.5/include/immintrin.h
copy to linux-x86/lib64/clang/12.0.1/include/immintrin.h
index e9dff23..8fb5447 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/immintrin.h
+++ b/linux-x86/lib64/clang/12.0.1/include/immintrin.h
@@ -472,6 +472,11 @@
 #endif
 
 #if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__KL__) || defined(__WIDEKL__)
+#include <keylockerintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
     defined(__AMXTILE__) || defined(__AMXINT8__) || defined(__AMXBF16__)
 #include <amxintrin.h>
 #endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/intrin.h b/linux-x86/lib64/clang/12.0.1/include/intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/inttypes.h b/linux-x86/lib64/clang/12.0.1/include/inttypes.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/inttypes.h
rename to linux-x86/lib64/clang/12.0.1/include/inttypes.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/invpcidintrin.h b/linux-x86/lib64/clang/12.0.1/include/invpcidintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/invpcidintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/invpcidintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/iso646.h b/linux-x86/lib64/clang/12.0.1/include/iso646.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/iso646.h
rename to linux-x86/lib64/clang/12.0.1/include/iso646.h
diff --git a/linux-x86/lib64/clang/12.0.1/include/keylockerintrin.h b/linux-x86/lib64/clang/12.0.1/include/keylockerintrin.h
new file mode 100644
index 0000000..c15d39c
--- /dev/null
+++ b/linux-x86/lib64/clang/12.0.1/include/keylockerintrin.h
@@ -0,0 +1,506 @@
+/*===----------------- keylockerintrin.h - KL Intrinsics -------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <keylockerintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+#ifndef _KEYLOCKERINTRIN_H
+#define _KEYLOCKERINTRIN_H
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__KL__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("kl"),\
+                 __min_vector_width__(128)))
+
+/// Load internal wrapping key from __intkey, __enkey_lo and __enkey_hi. __ctl
+/// will assigned to EAX, whch specifies the KeySource and whether backing up
+/// the key is permitted. The 256-bit encryption key is loaded from the two
+/// explicit operands (__enkey_lo and __enkey_hi). The 128-bit integrity key is
+/// loaded from the implicit operand XMM0 which assigned by __intkey.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
+///
+/// \operation
+/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
+///   GP (0)
+/// FI
+/// IF “LOADIWKEY exiting” VM execution control set
+///   VMexit
+/// FI
+/// IF __ctl[4:1] > 1 // Reserved KeySource encoding used
+///   GP (0)
+/// FI
+/// IF __ctl[31:5] != 0 // Reserved bit in __ctl is set
+///   GP (0)
+/// FI
+/// IF __ctl[0] AND (CPUID.19H.ECX[0] == 0) // NoBackup is not supported on this part
+///   GP (0)
+/// FI
+/// IF (__ctl[4:1] == 1) AND (CPUID.19H.ECX[1] == 0) // KeySource of 1 is not supported on this part
+///   GP (0)
+/// FI
+/// IF (__ctl[4:1] == 0) // KeySource of 0.
+///   IWKey.Encryption Key[127:0] := __enkey_hi[127:0]:
+///   IWKey.Encryption Key[255:128] := __enkey_lo[127:0]
+///   IWKey.IntegrityKey[127:0] := __intkey[127:0]
+///   IWKey.NoBackup := __ctl[0]
+///   IWKey.KeySource := __ctl[4:1]
+///   ZF := 0
+/// ELSE // KeySource of 1. See RDSEED definition for details of randomness
+///   IF HW_NRND_GEN.ready == 1 // Full-entropy random data from RDSEED was received
+///     IWKey.Encryption Key[127:0] := __enkey_hi[127:0] XOR HW_NRND_GEN.data[127:0]
+///     IWKey.Encryption Key[255:128] := __enkey_lo[127:0] XOR HW_NRND_GEN.data[255:128]
+///     IWKey.Encryption Key[255:0] := __enkey_hi[127:0]:__enkey_lo[127:0] XOR HW_NRND_GEN.data[255:0]
+///     IWKey.IntegrityKey[127:0] := __intkey[127:0] XOR HW_NRND_GEN.data[383:256]
+///     IWKey.NoBackup := __ctl[0]
+///     IWKey.KeySource := __ctl[4:1]
+///     ZF := 0
+///   ELSE // Random data was not returned from RDSEED. IWKey was not loaded
+///     ZF := 1
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
+               __m128i __enkey_lo, __m128i __enkey_hi) {
+  __builtin_ia32_loadiwkey (__intkey, __enkey_lo, __enkey_hi, __ctl);
+}
+
+/// Wrap a 128-bit AES key from __key into a key handle and output in
+/// ((__m128i*)__h) to ((__m128i*)__h) + 5  and a 32-bit value as return.
+/// The explicit source operand __htype specifies handle restrictions.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
+///
+/// \operation
+/// InputKey[127:0] := __key[127:0]
+/// KeyMetadata[2:0] := __htype[2:0]
+/// KeyMetadata[23:3] := 0 // Reserved for future usage
+/// KeyMetadata[27:24] := 0 // KeyType is AES-128 (value of 0)
+/// KeyMetadata[127:28] := 0 // Reserved for future usage
+/// Handle[383:0] := WrapKey128(InputKey[127:0], KeyMetadata[127:0],
+///                  IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
+/// dst[0] := IWKey.NoBackup
+/// dst[4:1] := IWKey.KeySource[3:0]
+/// dst[31:5] := 0
+/// MEM[__h+127:__h] := Handle[127:0]   // AAD
+/// MEM[__h+255:__h+128] := Handle[255:128] // Integrity Tag
+/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText
+/// MEM[__h+511:__h+384] := 0 // Reserved for future usage
+/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
+/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
+/// OF := 0
+/// SF := 0
+/// ZF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
+  return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
+}
+
+/// Wrap a 256-bit AES key from __key_hi:__key_lo into a key handle, then
+/// output handle in ((__m128i*)__h) to ((__m128i*)__h) + 6 and
+/// a 32-bit value as return.
+/// The explicit source operand __htype specifies handle restrictions.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
+///
+/// \operation
+/// InputKey[127:0] := __key_lo[127:0]
+/// InputKey[255:128] := __key_hi[255:128]
+/// KeyMetadata[2:0] := __htype[2:0]
+/// KeyMetadata[23:3] := 0 // Reserved for future usage
+/// KeyMetadata[27:24] := 1 // KeyType is AES-256 (value of 1)
+/// KeyMetadata[127:28] := 0 // Reserved for future usage
+/// Handle[511:0] := WrapKey256(InputKey[255:0], KeyMetadata[127:0],
+///                  IWKey.Integrity Key[127:0], IWKey.Encryption Key[255:0])
+/// dst[0] := IWKey.NoBackup
+/// dst[4:1] := IWKey.KeySource[3:0]
+/// dst[31:5] := 0
+/// MEM[__h+127:__h]   := Handle[127:0] // AAD
+/// MEM[__h+255:__h+128] := Handle[255:128] // Tag
+/// MEM[__h+383:__h+256] := Handle[383:256] // CipherText[127:0]
+/// MEM[__h+511:__h+384] := Handle[511:384] // CipherText[255:128]
+/// MEM[__h+639:__h+512] := 0 // Reserved for future usage
+/// MEM[__h+767:__h+640] := 0 // Reserved for future usage
+/// MEM[__h+895:__h+768] := 0 Integrity// Reserved for future usage
+/// OF := 0
+/// SF := 0
+/// ZF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
+                     void *__h) {
+  return __builtin_ia32_encodekey256_u32(__htype, (__v2di)__key_lo,
+                                         (__v2di)__key_hi, __h);
+}
+
+/// The AESENC128KL performs 10 rounds of AES to encrypt the __idata using
+/// the 128-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[383:256] ||
+///                    HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES128Encrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESENC256KL performs 14 rounds of AES to encrypt the __idata using
+/// the 256-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESDEC128KL performs 10 rounds of AES to decrypt the __idata using
+/// the 128-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
+/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
+///                  (Handle[127:0] AND (CPL > 0)) ||
+///                  Handle[383:256] ||
+///                  HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128)
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+/// The AESDEC256KL performs 10 rounds of AES to decrypt the __idata using
+/// the 256-bit key in the handle from the __h. It stores the result in the
+/// __odata. And return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
+///                   (Handle[127:0] AND (CPL > 0)) ||
+///                   Handle[383:256] ||
+///                   HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256)
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF (Authentic == 0)
+///     ZF := 1
+///   ELSE
+///     MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey)
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
+  return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+          || defined(__KL__) */
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) ||      \
+    defined(__WIDEKL__)
+
+/* Define the default attributes for the functions in this file. */
+#define __DEFAULT_FN_ATTRS \
+  __attribute__((__always_inline__, __nodebug__, __target__("kl,widekl"),\
+                 __min_vector_width__(128)))
+
+/// Encrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
+///
+/// \operation
+/// Handle := MEM[__h+383:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+/// Encrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+/// Decrypt __idata[0] to __idata[7] using 128-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
+///
+/// \operation
+/// Handle[383:0] := MEM[__h+383:__h]
+/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
+///                    (Handle[127:0] AND (CPL > 0)) ||
+///                    Handle[255:128] ||
+///                    HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 )
+/// IF (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+/// Decrypt __idata[0] to __idata[7] using 256-bit AES key indicated by handle
+/// at __h and store each resultant block back from __odata to __odata+7. And
+/// return the affected ZF flag status.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
+///
+/// \operation
+/// Handle[511:0] := MEM[__h+511:__h]
+/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
+///                   (Handle[127:0] AND (CPL > 0)) ||
+///                   Handle[255:128] ||
+///                   HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 )
+/// If (IllegalHandle)
+///   ZF := 1
+/// ELSE
+///   (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
+///   IF Authentic == 0
+///     ZF := 1
+///   ELSE
+///     FOR i := 0 to 7
+///       __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey)
+///     ENDFOR
+///     ZF := 0
+///   FI
+/// FI
+/// dst := ZF
+/// OF := 0
+/// SF := 0
+/// AF := 0
+/// PF := 0
+/// CF := 0
+/// \endoperation
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
+  return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
+                                           (const __v2di *)__idata, __h);
+}
+
+#undef __DEFAULT_FN_ATTRS
+
+#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \
+          || defined(__WIDEKL__) */
+
+#endif /* _KEYLOCKERINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/limits.h b/linux-x86/lib64/clang/12.0.1/include/limits.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/limits.h
rename to linux-x86/lib64/clang/12.0.1/include/limits.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/lwpintrin.h b/linux-x86/lib64/clang/12.0.1/include/lwpintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/lwpintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/lwpintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/lzcntintrin.h b/linux-x86/lib64/clang/12.0.1/include/lzcntintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/lzcntintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/lzcntintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/mm3dnow.h b/linux-x86/lib64/clang/12.0.1/include/mm3dnow.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/mm3dnow.h
rename to linux-x86/lib64/clang/12.0.1/include/mm3dnow.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/mm_malloc.h b/linux-x86/lib64/clang/12.0.1/include/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/mm_malloc.h
rename to linux-x86/lib64/clang/12.0.1/include/mm_malloc.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/mmintrin.h b/linux-x86/lib64/clang/12.0.1/include/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/mmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/mmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/module.modulemap b/linux-x86/lib64/clang/12.0.1/include/module.modulemap
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/module.modulemap
rename to linux-x86/lib64/clang/12.0.1/include/module.modulemap
diff --git a/linux-x86/lib64/clang/11.0.5/include/movdirintrin.h b/linux-x86/lib64/clang/12.0.1/include/movdirintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/movdirintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/movdirintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/msa.h b/linux-x86/lib64/clang/12.0.1/include/msa.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/msa.h
rename to linux-x86/lib64/clang/12.0.1/include/msa.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/mwaitxintrin.h b/linux-x86/lib64/clang/12.0.1/include/mwaitxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/mwaitxintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/mwaitxintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/nmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/nmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/nmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/nmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/omp-tools.h b/linux-x86/lib64/clang/12.0.1/include/omp-tools.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/omp-tools.h
rename to linux-x86/lib64/clang/12.0.1/include/omp-tools.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/omp.h b/linux-x86/lib64/clang/12.0.1/include/omp.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/omp.h
rename to linux-x86/lib64/clang/12.0.1/include/omp.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/ompt.h b/linux-x86/lib64/clang/12.0.1/include/ompt.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ompt.h
rename to linux-x86/lib64/clang/12.0.1/include/ompt.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/opencl-c-base.h b/linux-x86/lib64/clang/12.0.1/include/opencl-c-base.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/opencl-c-base.h
rename to linux-x86/lib64/clang/12.0.1/include/opencl-c-base.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/opencl-c.h b/linux-x86/lib64/clang/12.0.1/include/opencl-c.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/opencl-c.h
rename to linux-x86/lib64/clang/12.0.1/include/opencl-c.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/__clang_openmp_device_functions.h
rename to linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/__clang_openmp_device_functions.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/cmath
similarity index 95%
copy from darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
copy to linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/cmath
index bd6011e..1aff66a 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/openmp_wrappers/cmath
+++ b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/cmath
@@ -24,8 +24,11 @@
 // which might live in cstdlib.
 #include <cstdlib>
 
+// We need limits because __clang_cuda_cmath.h below uses `std::numeric_limit`.
+#include <limits>
+
 #pragma omp begin declare variant match(                                       \
-    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
+    device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any, allow_templates)})
 
 #define __CUDA__
 #define __OPENMP_NVPTX__
diff --git a/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex
new file mode 100644
index 0000000..306ffe2
--- /dev/null
+++ b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex
@@ -0,0 +1,50 @@
+/*===-- complex --- OpenMP complex wrapper for target regions --------- c++ -===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_OPENMP_COMPLEX__
+#define __CLANG_OPENMP_COMPLEX__
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+// We require std::math functions in the complex builtins below.
+#include <cmath>
+
+#define __CUDA__
+#include <__clang_cuda_complex_builtins.h>
+#endif
+
+// Grab the host header too.
+#include_next <complex>
+
+
+#ifdef __cplusplus
+
+// If we are compiling against libc++, the macro _LIBCPP_STD_VER should be set
+// after including <cmath> above. Since the complex header we use is a
+// simplified version of the libc++, we don't need it in this case. If we
+// compile against libstdc++, or any other standard library, we will overload
+// the (hopefully template) functions in the <complex> header with the ones we
+// got from libc++ which decomposes math functions, like `std::sin`, into
+// arithmetic and calls to non-complex functions, all of which we can then
+// handle.
+#ifndef _LIBCPP_STD_VER
+
+#pragma omp begin declare variant match(                                       \
+    device = {arch(nvptx, nvptx64)},                                           \
+    implementation = {extension(match_any, allow_templates)})
+
+#include <complex_cmath.h>
+
+#pragma omp end declare variant
+
+#endif
+
+#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/complex.h
rename to linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex.h
diff --git a/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex_cmath.h b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex_cmath.h
new file mode 100644
index 0000000..e3d9aeb
--- /dev/null
+++ b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/complex_cmath.h
@@ -0,0 +1,388 @@
+//===------------------------- __complex_cmath.h --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// std::complex header copied from the libcxx source and simplified for use in
+// OpenMP target offload regions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _OPENMP
+#error "This file is for OpenMP compilation only."
+#endif
+
+#ifndef __cplusplus
+#error "This file is for C++ compilation only."
+#endif
+
+#ifndef _LIBCPP_COMPLEX
+#define _LIBCPP_COMPLEX
+
+#include <cmath>
+#include <type_traits>
+
+#define __DEVICE__ static constexpr __attribute__((nothrow))
+
+namespace std {
+
+// abs
+
+template <class _Tp> __DEVICE__ _Tp abs(const std::complex<_Tp> &__c) {
+  return hypot(__c.real(), __c.imag());
+}
+
+// arg
+
+template <class _Tp> __DEVICE__ _Tp arg(const std::complex<_Tp> &__c) {
+  return atan2(__c.imag(), __c.real());
+}
+
+template <class _Tp>
+typename enable_if<is_integral<_Tp>::value || is_same<_Tp, double>::value,
+                   double>::type
+arg(_Tp __re) {
+  return atan2(0., __re);
+}
+
+template <class _Tp>
+typename enable_if<is_same<_Tp, float>::value, float>::type arg(_Tp __re) {
+  return atan2f(0.F, __re);
+}
+
+// norm
+
+template <class _Tp> __DEVICE__ _Tp norm(const std::complex<_Tp> &__c) {
+  if (std::isinf(__c.real()))
+    return abs(__c.real());
+  if (std::isinf(__c.imag()))
+    return abs(__c.imag());
+  return __c.real() * __c.real() + __c.imag() * __c.imag();
+}
+
+// conj
+
+template <class _Tp> std::complex<_Tp> conj(const std::complex<_Tp> &__c) {
+  return std::complex<_Tp>(__c.real(), -__c.imag());
+}
+
+// proj
+
+template <class _Tp> std::complex<_Tp> proj(const std::complex<_Tp> &__c) {
+  std::complex<_Tp> __r = __c;
+  if (std::isinf(__c.real()) || std::isinf(__c.imag()))
+    __r = std::complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag()));
+  return __r;
+}
+
+// polar
+
+template <class _Tp>
+complex<_Tp> polar(const _Tp &__rho, const _Tp &__theta = _Tp()) {
+  if (std::isnan(__rho) || signbit(__rho))
+    return std::complex<_Tp>(_Tp(NAN), _Tp(NAN));
+  if (std::isnan(__theta)) {
+    if (std::isinf(__rho))
+      return std::complex<_Tp>(__rho, __theta);
+    return std::complex<_Tp>(__theta, __theta);
+  }
+  if (std::isinf(__theta)) {
+    if (std::isinf(__rho))
+      return std::complex<_Tp>(__rho, _Tp(NAN));
+    return std::complex<_Tp>(_Tp(NAN), _Tp(NAN));
+  }
+  _Tp __x = __rho * cos(__theta);
+  if (std::isnan(__x))
+    __x = 0;
+  _Tp __y = __rho * sin(__theta);
+  if (std::isnan(__y))
+    __y = 0;
+  return std::complex<_Tp>(__x, __y);
+}
+
+// log
+
+template <class _Tp> std::complex<_Tp> log(const std::complex<_Tp> &__x) {
+  return std::complex<_Tp>(log(abs(__x)), arg(__x));
+}
+
+// log10
+
+template <class _Tp> std::complex<_Tp> log10(const std::complex<_Tp> &__x) {
+  return log(__x) / log(_Tp(10));
+}
+
+// sqrt
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sqrt(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(_Tp(INFINITY), __x.imag());
+  if (std::isinf(__x.real())) {
+    if (__x.real() > _Tp(0))
+      return std::complex<_Tp>(__x.real(), std::isnan(__x.imag())
+                                               ? __x.imag()
+                                               : copysign(_Tp(0), __x.imag()));
+    return std::complex<_Tp>(std::isnan(__x.imag()) ? __x.imag() : _Tp(0),
+                             copysign(__x.real(), __x.imag()));
+  }
+  return polar(sqrt(abs(__x)), arg(__x) / _Tp(2));
+}
+
+// exp
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> exp(const std::complex<_Tp> &__x) {
+  _Tp __i = __x.imag();
+  if (std::isinf(__x.real())) {
+    if (__x.real() < _Tp(0)) {
+      if (!std::isfinite(__i))
+        __i = _Tp(1);
+    } else if (__i == 0 || !std::isfinite(__i)) {
+      if (std::isinf(__i))
+        __i = _Tp(NAN);
+      return std::complex<_Tp>(__x.real(), __i);
+    }
+  } else if (std::isnan(__x.real()) && __x.imag() == 0)
+    return __x;
+  _Tp __e = exp(__x.real());
+  return std::complex<_Tp>(__e * cos(__i), __e * sin(__i));
+}
+
+// pow
+
+template <class _Tp>
+std::complex<_Tp> pow(const std::complex<_Tp> &__x,
+                      const std::complex<_Tp> &__y) {
+  return exp(__y * log(__x));
+}
+
+// __sqr, computes pow(x, 2)
+
+template <class _Tp> std::complex<_Tp> __sqr(const std::complex<_Tp> &__x) {
+  return std::complex<_Tp>((__x.real() - __x.imag()) *
+                               (__x.real() + __x.imag()),
+                           _Tp(2) * __x.real() * __x.imag());
+}
+
+// asinh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> asinh(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.real())) {
+    if (std::isnan(__x.imag()))
+      return __x;
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(__x.real(),
+                               copysign(__pi * _Tp(0.25), __x.imag()));
+    return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
+  }
+  if (std::isnan(__x.real())) {
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(__x.imag(), __x.real());
+    if (__x.imag() == 0)
+      return __x;
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(copysign(__x.imag(), __x.real()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) + _Tp(1)));
+  return std::complex<_Tp>(copysign(__z.real(), __x.real()),
+                           copysign(__z.imag(), __x.imag()));
+}
+
+// acosh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> acosh(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.real())) {
+    if (std::isnan(__x.imag()))
+      return std::complex<_Tp>(abs(__x.real()), __x.imag());
+    if (std::isinf(__x.imag())) {
+      if (__x.real() > 0)
+        return std::complex<_Tp>(__x.real(),
+                                 copysign(__pi * _Tp(0.25), __x.imag()));
+      else
+        return std::complex<_Tp>(-__x.real(),
+                                 copysign(__pi * _Tp(0.75), __x.imag()));
+    }
+    if (__x.real() < 0)
+      return std::complex<_Tp>(-__x.real(), copysign(__pi, __x.imag()));
+    return std::complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
+  }
+  if (std::isnan(__x.real())) {
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(abs(__x.imag()), __x.real());
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(abs(__x.imag()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1)));
+  return std::complex<_Tp>(copysign(__z.real(), _Tp(0)),
+                           copysign(__z.imag(), __x.imag()));
+}
+
+// atanh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> atanh(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.imag())) {
+    return std::complex<_Tp>(copysign(_Tp(0), __x.real()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  }
+  if (std::isnan(__x.imag())) {
+    if (std::isinf(__x.real()) || __x.real() == 0)
+      return std::complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag());
+    return std::complex<_Tp>(__x.imag(), __x.imag());
+  }
+  if (std::isnan(__x.real())) {
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.real())) {
+    return std::complex<_Tp>(copysign(_Tp(0), __x.real()),
+                             copysign(__pi / _Tp(2), __x.imag()));
+  }
+  if (abs(__x.real()) == _Tp(1) && __x.imag() == _Tp(0)) {
+    return std::complex<_Tp>(copysign(_Tp(INFINITY), __x.real()),
+                             copysign(_Tp(0), __x.imag()));
+  }
+  std::complex<_Tp> __z = log((_Tp(1) + __x) / (_Tp(1) - __x)) / _Tp(2);
+  return std::complex<_Tp>(copysign(__z.real(), __x.real()),
+                           copysign(__z.imag(), __x.imag()));
+}
+
+// sinh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sinh(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.real()) && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(__x.real(), _Tp(NAN));
+  if (__x.real() == 0 && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(__x.real(), _Tp(NAN));
+  if (__x.imag() == 0 && !std::isfinite(__x.real()))
+    return __x;
+  return std::complex<_Tp>(sinh(__x.real()) * cos(__x.imag()),
+                           cosh(__x.real()) * sin(__x.imag()));
+}
+
+// cosh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> cosh(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.real()) && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(abs(__x.real()), _Tp(NAN));
+  if (__x.real() == 0 && !std::isfinite(__x.imag()))
+    return std::complex<_Tp>(_Tp(NAN), __x.real());
+  if (__x.real() == 0 && __x.imag() == 0)
+    return std::complex<_Tp>(_Tp(1), __x.imag());
+  if (__x.imag() == 0 && !std::isfinite(__x.real()))
+    return std::complex<_Tp>(abs(__x.real()), __x.imag());
+  return std::complex<_Tp>(cosh(__x.real()) * cos(__x.imag()),
+                           sinh(__x.real()) * sin(__x.imag()));
+}
+
+// tanh
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> tanh(const std::complex<_Tp> &__x) {
+  if (std::isinf(__x.real())) {
+    if (!std::isfinite(__x.imag()))
+      return std::complex<_Tp>(_Tp(1), _Tp(0));
+    return std::complex<_Tp>(_Tp(1),
+                             copysign(_Tp(0), sin(_Tp(2) * __x.imag())));
+  }
+  if (std::isnan(__x.real()) && __x.imag() == 0)
+    return __x;
+  _Tp __2r(_Tp(2) * __x.real());
+  _Tp __2i(_Tp(2) * __x.imag());
+  _Tp __d(cosh(__2r) + cos(__2i));
+  _Tp __2rsh(sinh(__2r));
+  if (std::isinf(__2rsh) && std::isinf(__d))
+    return std::complex<_Tp>(__2rsh > _Tp(0) ? _Tp(1) : _Tp(-1),
+                             __2i > _Tp(0) ? _Tp(0) : _Tp(-0.));
+  return std::complex<_Tp>(__2rsh / __d, sin(__2i) / __d);
+}
+
+// asin
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> asin(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = asinh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// acos
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> acos(const std::complex<_Tp> &__x) {
+  const _Tp __pi(atan2(+0., -0.));
+  if (std::isinf(__x.real())) {
+    if (std::isnan(__x.imag()))
+      return std::complex<_Tp>(__x.imag(), __x.real());
+    if (std::isinf(__x.imag())) {
+      if (__x.real() < _Tp(0))
+        return std::complex<_Tp>(_Tp(0.75) * __pi, -__x.imag());
+      return std::complex<_Tp>(_Tp(0.25) * __pi, -__x.imag());
+    }
+    if (__x.real() < _Tp(0))
+      return std::complex<_Tp>(__pi,
+                               signbit(__x.imag()) ? -__x.real() : __x.real());
+    return std::complex<_Tp>(_Tp(0),
+                             signbit(__x.imag()) ? __x.real() : -__x.real());
+  }
+  if (std::isnan(__x.real())) {
+    if (std::isinf(__x.imag()))
+      return std::complex<_Tp>(__x.real(), -__x.imag());
+    return std::complex<_Tp>(__x.real(), __x.real());
+  }
+  if (std::isinf(__x.imag()))
+    return std::complex<_Tp>(__pi / _Tp(2), -__x.imag());
+  if (__x.real() == 0 && (__x.imag() == 0 || isnan(__x.imag())))
+    return std::complex<_Tp>(__pi / _Tp(2), -__x.imag());
+  std::complex<_Tp> __z = log(__x + sqrt(__sqr(__x) - _Tp(1)));
+  if (signbit(__x.imag()))
+    return std::complex<_Tp>(abs(__z.imag()), abs(__z.real()));
+  return std::complex<_Tp>(abs(__z.imag()), -abs(__z.real()));
+}
+
+// atan
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> atan(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = atanh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// sin
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> sin(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = sinh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+// cos
+
+template <class _Tp> std::complex<_Tp> cos(const std::complex<_Tp> &__x) {
+  return cosh(complex<_Tp>(-__x.imag(), __x.real()));
+}
+
+// tan
+
+template <class _Tp>
+__DEVICE__ std::complex<_Tp> tan(const std::complex<_Tp> &__x) {
+  std::complex<_Tp> __z = tanh(complex<_Tp>(-__x.imag(), __x.real()));
+  return std::complex<_Tp>(__z.imag(), -__z.real());
+}
+
+} // namespace std
+
+#endif
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/math.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/math.h
rename to linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/math.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/new b/linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/new
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/openmp_wrappers/new
rename to linux-x86/lib64/clang/12.0.1/include/openmp_wrappers/new
diff --git a/linux-x86/lib64/clang/11.0.5/include/pconfigintrin.h b/linux-x86/lib64/clang/12.0.1/include/pconfigintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/pconfigintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/pconfigintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/pkuintrin.h b/linux-x86/lib64/clang/12.0.1/include/pkuintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/pkuintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/pkuintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/pmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/pmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/pmmintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h b/linux-x86/lib64/clang/12.0.1/include/popcntintrin.h
similarity index 81%
copy from darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h
copy to linux-x86/lib64/clang/12.0.1/include/popcntintrin.h
index 3129010..0aa94ae 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/popcntintrin.h
+++ b/linux-x86/lib64/clang/12.0.1/include/popcntintrin.h
@@ -13,6 +13,12 @@
 /* Define the default attributes for the functions in this file. */
 #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("popcnt")))
 
+#if defined(__cplusplus) && (__cplusplus >= 201103L)
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS constexpr
+#else
+#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
+#endif
+
 /// Counts the number of bits in the source operand having a value of 1.
 ///
 /// \headerfile <x86intrin.h>
@@ -23,7 +29,7 @@
 ///    An unsigned 32-bit integer operand.
 /// \returns A 32-bit integer containing the number of bits with value 1 in the
 ///    source operand.
-static __inline__ int __DEFAULT_FN_ATTRS
+static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
 _mm_popcnt_u32(unsigned int __A)
 {
   return __builtin_popcount(__A);
@@ -40,7 +46,7 @@
 ///    An unsigned 64-bit integer operand.
 /// \returns A 64-bit integer containing the number of bits with value 1 in the
 ///    source operand.
-static __inline__ long long __DEFAULT_FN_ATTRS
+static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
 _mm_popcnt_u64(unsigned long long __A)
 {
   return __builtin_popcountll(__A);
@@ -48,5 +54,6 @@
 #endif /* __x86_64__ */
 
 #undef __DEFAULT_FN_ATTRS
+#undef __DEFAULT_FN_ATTRS_CONSTEXPR
 
 #endif /* __POPCNTINTRIN_H */
diff --git a/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h b/linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/emmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/emmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/emmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h b/linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/mm_malloc.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mm_malloc.h
rename to linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/mm_malloc.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h b/linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/mmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/mmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/mmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/pmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/pmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/pmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h b/linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/smmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/smmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/smmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/tmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/tmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/tmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/xmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ppc_wrappers/xmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ppc_wrappers/xmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/prfchwintrin.h b/linux-x86/lib64/clang/12.0.1/include/prfchwintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/prfchwintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/prfchwintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc b/linux-x86/lib64/clang/12.0.1/include/profile/InstrProfData.inc
similarity index 88%
copy from darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
copy to linux-x86/lib64/clang/12.0.1/include/profile/InstrProfData.inc
index a691352..6d0ffb1 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/profile/InstrProfData.inc
+++ b/linux-x86/lib64/clang/12.0.1/include/profile/InstrProfData.inc
@@ -157,6 +157,8 @@
 #ifndef VALUE_RANGE_PROF
 VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
 #else /* VALUE_RANGE_PROF */
+/* FIXME: This is to be removed after switching to the new memop value
+ * profiling. */
 VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
                       INSTR_PROF_COMMA
 VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
@@ -753,9 +755,14 @@
 #define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
 #define INSTR_PROF_VALUE_PROF_FUNC_STR \
         INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
+/* FIXME: This is to be removed after switching to the new memop value
+ * profiling. */
 #define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
 #define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
         INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
+#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC __llvm_profile_instrument_memop
+#define INSTR_PROF_VALUE_PROF_MEMOP_FUNC_STR                                   \
+  INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_MEMOP_FUNC)
 
 /* InstrProfile per-function control data alignment.  */
 #define INSTR_PROF_DATA_ALIGNMENT 8
@@ -783,3 +790,121 @@
 #endif
 
 #undef COVMAP_V2_OR_V3
+
+#ifdef INSTR_PROF_VALUE_PROF_MEMOP_API
+
+#ifdef __cplusplus
+#define INSTR_PROF_INLINE inline
+#else
+#define INSTR_PROF_INLINE
+#endif
+
+/* The value range buckets (22 buckets) for the memop size value profiling looks
+ * like:
+ *
+ *   [0, 0]
+ *   [1, 1]
+ *   [2, 2]
+ *   [3, 3]
+ *   [4, 4]
+ *   [5, 5]
+ *   [6, 6]
+ *   [7, 7]
+ *   [8, 8]
+ *   [9, 15]
+ *   [16, 16]
+ *   [17, 31]
+ *   [32, 32]
+ *   [33, 63]
+ *   [64, 64]
+ *   [65, 127]
+ *   [128, 128]
+ *   [129, 255]
+ *   [256, 256]
+ *   [257, 511]
+ *   [512, 512]
+ *   [513, UINT64_MAX]
+ *
+ * Each range has a 'representative value' which is the lower end value of the
+ * range and used to store in the runtime profile data records and the VP
+ * metadata. For example, it's 2 for [2, 2] and 64 for [65, 127].
+ */
+
+/*
+ * Clz and Popcount. This code was copied from
+ * compiler-rt/lib/fuzzer/{FuzzerBuiltins.h,FuzzerBuiltinsMsvc.h} and
+ * llvm/include/llvm/Support/MathExtras.h.
+ */
+#if defined(_MSC_VER) && !defined(__clang__)
+
+#include <intrin.h>
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfClzll(unsigned long long X) {
+  unsigned long LeadZeroIdx = 0;
+#if !defined(_M_ARM64) && !defined(_M_X64)
+  // Scan the high 32 bits.
+  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X >> 32)))
+    return (int)(63 - (LeadZeroIdx + 32)); // Create a bit offset
+                                                      // from the MSB.
+  // Scan the low 32 bits.
+  if (_BitScanReverse(&LeadZeroIdx, (unsigned long)(X)))
+    return (int)(63 - LeadZeroIdx);
+#else
+  if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx;
+#endif
+  return 64;
+}
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfPopcountll(unsigned long long X) {
+  // This code originates from https://reviews.llvm.org/rG30626254510f.
+  unsigned long long v = X;
+  v = v - ((v >> 1) & 0x5555555555555555ULL);
+  v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL);
+  v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL;
+  return (int)((unsigned long long)(v * 0x0101010101010101ULL) >> 56);
+}
+
+#else
+
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfClzll(unsigned long long X) { return __builtin_clzll(X); }
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+int InstProfPopcountll(unsigned long long X) { return __builtin_popcountll(X); }
+
+#endif  /* defined(_MSC_VER) && !defined(__clang__) */
+
+/* Map an (observed) memop size value to the representative value of its range.
+ * For example, 5 -> 5, 22 -> 17, 99 -> 65, 256 -> 256, 1001 -> 513. */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE uint64_t
+InstrProfGetRangeRepValue(uint64_t Value) {
+  if (Value <= 8)
+    // The first ranges are individually tracked. Use the value as is.
+    return Value;
+  else if (Value >= 513)
+    // The last range is mapped to its lowest value.
+    return 513;
+  else if (InstProfPopcountll(Value) == 1)
+    // If it's a power of two, use it as is.
+    return Value;
+  else
+    // Otherwise, take to the previous power of two + 1.
+    return (1 << (64 - InstProfClzll(Value) - 1)) + 1;
+}
+
+/* Return true if the range that an (observed) memop size value belongs to has
+ * only a single value in the range.  For example, 0 -> true, 8 -> true, 10 ->
+ * false, 64 -> true, 100 -> false, 513 -> false. */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE unsigned
+InstrProfIsSingleValRange(uint64_t Value) {
+  if (Value <= 8)
+    // The first ranges are individually tracked.
+    return 1;
+  else if (InstProfPopcountll(Value) == 1)
+    // If it's a power of two, there's only one value.
+    return 1;
+  else
+    // Otherwise, there's more than one value in the range.
+    return 0;
+}
+
+#endif /* INSTR_PROF_VALUE_PROF_MEMOP_API */
diff --git a/linux-x86/lib64/clang/11.0.5/include/ptwriteintrin.h b/linux-x86/lib64/clang/12.0.1/include/ptwriteintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/ptwriteintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/ptwriteintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/rdseedintrin.h b/linux-x86/lib64/clang/12.0.1/include/rdseedintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/rdseedintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/rdseedintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/rtmintrin.h b/linux-x86/lib64/clang/12.0.1/include/rtmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/rtmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/rtmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/s390intrin.h b/linux-x86/lib64/clang/12.0.1/include/s390intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/s390intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/s390intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/allocator_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/allocator_interface.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/allocator_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/asan_interface.h
similarity index 96%
copy from darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
copy to linux-x86/lib64/clang/12.0.1/include/sanitizer/asan_interface.h
index 6af93aa..792ef9c 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/asan_interface.h
+++ b/linux-x86/lib64/clang/12.0.1/include/sanitizer/asan_interface.h
@@ -188,8 +188,8 @@
 /// \param addr Address to locate.
 /// \param name Buffer to store the variable's name.
 /// \param name_size Size in bytes of the variable's name buffer.
-/// \param region_address [out] Address of the region.
-/// \param region_size [out] Size of the region in bytes.
+/// \param[out] region_address Address of the region.
+/// \param[out] region_size Size of the region in bytes.
 ///
 /// \returns Returns the category of the given pointer as a constant string.
 const char *__asan_locate_address(void *addr, char *name, size_t name_size,
@@ -204,7 +204,7 @@
 /// \param addr A heap address.
 /// \param trace A buffer to store the stack trace.
 /// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
+/// \param[out] thread_id The thread ID of the address.
 ///
 /// \returns Returns the number of stored frames or 0 on error.
 size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
@@ -219,7 +219,7 @@
 /// \param addr A heap address.
 /// \param trace A buffer to store the stack trace.
 /// \param size Size in bytes of the trace buffer.
-/// \param thread_id [out] The thread ID of the address.
+/// \param[out] thread_id The thread ID of the address.
 ///
 /// \returns Returns the number of stored frames or 0 on error.
 size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
@@ -228,8 +228,8 @@
 /// Gets the current shadow memory mapping (useful for calling from the
 /// debugger).
 ///
-/// \param shadow_scale [out] Shadow scale value.
-/// \param shadow_offset [out] Offset value.
+/// \param[out] shadow_scale Shadow scale value.
+/// \param[out] shadow_offset Offset value.
 void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
 
 /// This is an internal function that is called to report an error. However,
@@ -302,8 +302,8 @@
 ///
 /// \param fake_stack An opaque handler to a fake stack.
 /// \param addr Address to test.
-/// \param beg [out] Beginning of fake frame.
-/// \param end [out] End of fake frame.
+/// \param[out] beg Beginning of fake frame.
+/// \param[out] end End of fake frame.
 /// \returns Stack address or NULL.
 void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
                                    void **end);
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/common_interface_defs.h
similarity index 98%
copy from darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
copy to linux-x86/lib64/clang/12.0.1/include/sanitizer/common_interface_defs.h
index f979c6a..b4f977b 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/common_interface_defs.h
+++ b/linux-x86/lib64/clang/12.0.1/include/sanitizer/common_interface_defs.h
@@ -320,7 +320,7 @@
 /// signal callback runs during the switch, it will not benefit from stack
 /// use-after-return detection.
 ///
-/// \param fake_stack_save [out] Fake stack save location.
+/// \param[out] fake_stack_save Fake stack save location.
 /// \param bottom Bottom address of stack.
 /// \param size Size of stack in bytes.
 void __sanitizer_start_switch_fiber(void **fake_stack_save,
@@ -335,8 +335,8 @@
 /// <c>__sanitizer_start_switch_fiber()</c>.
 ///
 /// \param fake_stack_save Fake stack save location.
-/// \param bottom_old [out] Bottom address of old stack.
-/// \param size_old [out] Size of old stack in bytes.
+/// \param[out] bottom_old Bottom address of old stack.
+/// \param[out] size_old Size of old stack in bytes.
 void __sanitizer_finish_switch_fiber(void *fake_stack_save,
                                      const void **bottom_old,
                                      size_t *size_old);
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/coverage_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/coverage_interface.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/coverage_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/dfsan_interface.h
similarity index 94%
copy from darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
copy to linux-x86/lib64/clang/12.0.1/include/sanitizer/dfsan_interface.h
index 81546e5..18b2c81 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/dfsan_interface.h
+++ b/linux-x86/lib64/clang/12.0.1/include/sanitizer/dfsan_interface.h
@@ -80,9 +80,11 @@
 size_t dfsan_get_label_count(void);
 
 /// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
-/// with the application memory. Will work only if there are no other
-/// threads executing DFSan-instrumented code concurrently.
-/// Use this call to start over the taint tracking within the same procces.
+/// with the application memory.  Use this call to start over the taint tracking
+/// within the same process.
+///
+/// Note: If another thread is working with tainted data during the flush, that
+/// taint could still be written to shadow after the flush.
 void dfsan_flush(void);
 
 /// Sets a callback to be invoked on calls to write().  The callback is invoked
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/hwasan_interface.h
similarity index 96%
copy from darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
copy to linux-x86/lib64/clang/12.0.1/include/sanitizer/hwasan_interface.h
index 4c9ad13..14035c0 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/hwasan_interface.h
+++ b/linux-x86/lib64/clang/12.0.1/include/sanitizer/hwasan_interface.h
@@ -73,6 +73,9 @@
    * accessed through the pointer in x, or -1 if the whole range is good. */
   intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
 
+  /* Sets the callback function to be called during HWASan error reporting. */
+  void __hwasan_set_error_report_callback(void (*callback)(const char *));
+
   int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
   void * __sanitizer_memalign(size_t alignment, size_t size);
   void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/linux_syscall_hooks.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/linux_syscall_hooks.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/linux_syscall_hooks.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/lsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/lsan_interface.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/lsan_interface.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/msan_interface.h
similarity index 96%
copy from darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
copy to linux-x86/lib64/clang/12.0.1/include/sanitizer/msan_interface.h
index d40c556..eeb39fb 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/msan_interface.h
+++ b/linux-x86/lib64/clang/12.0.1/include/sanitizer/msan_interface.h
@@ -114,6 +114,9 @@
      call to __msan_scoped_disable_interceptor_checks. */
   void __msan_scoped_enable_interceptor_checks(void);
 
+  void __msan_start_switch_fiber(const void *bottom, size_t size);
+  void __msan_finish_switch_fiber(const void **bottom_old, size_t *size_old);
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/netbsd_syscall_hooks.h
similarity index 95%
copy from darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
copy to linux-x86/lib64/clang/12.0.1/include/sanitizer/netbsd_syscall_hooks.h
index 370da0e..f661152 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/sanitizer/netbsd_syscall_hooks.h
+++ b/linux-x86/lib64/clang/12.0.1/include/sanitizer/netbsd_syscall_hooks.h
@@ -20,8 +20,8 @@
 // DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
 //
 // Generated with: generate_netbsd_syscalls.awk
-// Generated date: 2019-12-24
-// Generated from: syscalls.master,v 1.296 2019/09/22 22:59:39 christos Exp
+// Generated date: 2020-09-10
+// Generated from: syscalls.master,v 1.306 2020/08/14 00:53:16 riastradh Exp
 //
 //===----------------------------------------------------------------------===//
 #ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
@@ -474,7 +474,12 @@
   __sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to))
 #define __sanitizer_syscall_post_dup2(res, from, to)                           \
   __sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to))
-/* syscall 91 has been skipped */
+#define __sanitizer_syscall_pre_getrandom(buf, buflen, flags)                  \
+  __sanitizer_syscall_pre_impl_getrandom(                                      \
+      (long long)(buf), (long long)(buflen), (long long)(flags))
+#define __sanitizer_syscall_post_getrandom(res, buf, buflen, flags)            \
+  __sanitizer_syscall_post_impl_getrandom(                                     \
+      res, (long long)(buf), (long long)(buflen), (long long)(flags))
 #define __sanitizer_syscall_pre_fcntl(fd, cmd, arg)                            \
   __sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd),        \
                                      (long long)(arg))
@@ -849,9 +854,31 @@
 #define __sanitizer_syscall_post_sysarch(res, op, parms)                       \
   __sanitizer_syscall_post_impl_sysarch(res, (long long)(op),                  \
                                         (long long)(parms))
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+#define __sanitizer_syscall_pre___futex(uaddr, op, val, timeout, uaddr2, val2, \
+                                        val3)                                  \
+  __sanitizer_syscall_pre_impl___futex((long long)(uaddr), (long long)(op),    \
+                                       (long long)(val), (long long)(timeout), \
+                                       (long long)(uaddr2), (long long)(val2), \
+                                       (long long)(val3))
+#define __sanitizer_syscall_post___futex(res, uaddr, op, val, timeout, uaddr2, \
+                                         val2, val3)                           \
+  __sanitizer_syscall_post_impl___futex(                                       \
+      res, (long long)(uaddr), (long long)(op), (long long)(val),              \
+      (long long)(timeout), (long long)(uaddr2), (long long)(val2),            \
+      (long long)(val3))
+#define __sanitizer_syscall_pre___futex_set_robust_list(head, len)             \
+  __sanitizer_syscall_pre_impl___futex_set_robust_list((long long)(head),      \
+                                                       (long long)(len))
+#define __sanitizer_syscall_post___futex_set_robust_list(res, head, len)       \
+  __sanitizer_syscall_post_impl___futex_set_robust_list(                       \
+      res, (long long)(head), (long long)(len))
+#define __sanitizer_syscall_pre___futex_get_robust_list(lwpid, headp, lenp)    \
+  __sanitizer_syscall_pre_impl___futex_get_robust_list(                        \
+      (long long)(lwpid), (long long)(headp), (long long)(lenp))
+#define __sanitizer_syscall_post___futex_get_robust_list(res, lwpid, headp,    \
+                                                         lenp)                 \
+  __sanitizer_syscall_post_impl___futex_get_robust_list(                       \
+      res, (long long)(lwpid), (long long)(headp), (long long)(lenp))
 #if !defined(_LP64)
 #define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5)       \
   __sanitizer_syscall_pre_impl_compat_10_osemsys(                              \
@@ -2731,6 +2758,83 @@
   __sanitizer_syscall_post_impl___fhstatvfs190(                                \
       res, (long long)(fhp), (long long)(fh_size), (long long)(buf),           \
       (long long)(flags))
+#define __sanitizer_syscall_pre___acl_get_link(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_get_link(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_link(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_get_link(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_link(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_set_link(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_link(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_set_link(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_delete_link(path, type)                  \
+  __sanitizer_syscall_pre_impl___acl_delete_link((long long)(path),            \
+                                                 (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_link(res, path, type)            \
+  __sanitizer_syscall_post_impl___acl_delete_link(res, (long long)(path),      \
+                                                  (long long)(type))
+#define __sanitizer_syscall_pre___acl_aclcheck_link(path, type, aclp)          \
+  __sanitizer_syscall_pre_impl___acl_aclcheck_link(                            \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_link(res, path, type, aclp)    \
+  __sanitizer_syscall_post_impl___acl_aclcheck_link(                           \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_get_file(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_get_file(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_file(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_get_file(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_file(path, type, aclp)               \
+  __sanitizer_syscall_pre_impl___acl_set_file(                                 \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_file(res, path, type, aclp)         \
+  __sanitizer_syscall_post_impl___acl_set_file(                                \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_get_fd(filedes, type, aclp)              \
+  __sanitizer_syscall_pre_impl___acl_get_fd(                                   \
+      (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_get_fd(res, filedes, type, aclp)        \
+  __sanitizer_syscall_post_impl___acl_get_fd(                                  \
+      res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_set_fd(filedes, type, aclp)              \
+  __sanitizer_syscall_pre_impl___acl_set_fd(                                   \
+      (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_set_fd(res, filedes, type, aclp)        \
+  __sanitizer_syscall_post_impl___acl_set_fd(                                  \
+      res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_delete_file(path, type)                  \
+  __sanitizer_syscall_pre_impl___acl_delete_file((long long)(path),            \
+                                                 (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_file(res, path, type)            \
+  __sanitizer_syscall_post_impl___acl_delete_file(res, (long long)(path),      \
+                                                  (long long)(type))
+#define __sanitizer_syscall_pre___acl_delete_fd(filedes, type)                 \
+  __sanitizer_syscall_pre_impl___acl_delete_fd((long long)(filedes),           \
+                                               (long long)(type))
+#define __sanitizer_syscall_post___acl_delete_fd(res, filedes, type)           \
+  __sanitizer_syscall_post_impl___acl_delete_fd(res, (long long)(filedes),     \
+                                                (long long)(type))
+#define __sanitizer_syscall_pre___acl_aclcheck_file(path, type, aclp)          \
+  __sanitizer_syscall_pre_impl___acl_aclcheck_file(                            \
+      (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_file(res, path, type, aclp)    \
+  __sanitizer_syscall_post_impl___acl_aclcheck_file(                           \
+      res, (long long)(path), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre___acl_aclcheck_fd(filedes, type, aclp)         \
+  __sanitizer_syscall_pre_impl___acl_aclcheck_fd(                              \
+      (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_post___acl_aclcheck_fd(res, filedes, type, aclp)   \
+  __sanitizer_syscall_post_impl___acl_aclcheck_fd(                             \
+      res, (long long)(filedes), (long long)(type), (long long)(aclp))
+#define __sanitizer_syscall_pre_lpathconf(path, name)                          \
+  __sanitizer_syscall_pre_impl_lpathconf((long long)(path), (long long)(name))
+#define __sanitizer_syscall_post_lpathconf(res, path, name)                    \
+  __sanitizer_syscall_post_impl_lpathconf(res, (long long)(path),              \
+                                          (long long)(name))
 
 /* Compat with older releases */
 #define __sanitizer_syscall_pre_getvfsstat                                     \
@@ -3088,7 +3192,10 @@
 void __sanitizer_syscall_pre_impl_dup2(long long from, long long to);
 void __sanitizer_syscall_post_impl_dup2(long long res, long long from,
                                         long long to);
-/* syscall 91 has been skipped */
+void __sanitizer_syscall_pre_impl_getrandom(long long buf, long long buflen,
+                                            long long flags);
+void __sanitizer_syscall_post_impl_getrandom(long long res, long long buf,
+                                             long long buflen, long long flags);
 void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd,
                                         long long arg);
 void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd,
@@ -3380,9 +3487,26 @@
 void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms);
 void __sanitizer_syscall_post_impl_sysarch(long long res, long long op,
                                            long long parms);
-/* syscall 166 has been skipped */
-/* syscall 167 has been skipped */
-/* syscall 168 has been skipped */
+void __sanitizer_syscall_pre_impl___futex(long long uaddr, long long op,
+                                          long long val, long long timeout,
+                                          long long uaddr2, long long val2,
+                                          long long val3);
+void __sanitizer_syscall_post_impl___futex(long long res, long long uaddr,
+                                           long long op, long long val,
+                                           long long timeout, long long uaddr2,
+                                           long long val2, long long val3);
+void __sanitizer_syscall_pre_impl___futex_set_robust_list(long long head,
+                                                          long long len);
+void __sanitizer_syscall_post_impl___futex_set_robust_list(long long res,
+                                                           long long head,
+                                                           long long len);
+void __sanitizer_syscall_pre_impl___futex_get_robust_list(long long lwpid,
+                                                          long long headp,
+                                                          long long lenp);
+void __sanitizer_syscall_post_impl___futex_get_robust_list(long long res,
+                                                           long long lwpid,
+                                                           long long headp,
+                                                           long long lenp);
 #if !defined(_LP64)
 void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which,
                                                     long long a2, long long a3,
@@ -4802,6 +4926,75 @@
                                                   long long fh_size,
                                                   long long buf,
                                                   long long flags);
+void __sanitizer_syscall_pre_impl___acl_get_link(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_link(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_link(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_link(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_delete_link(long long path,
+                                                    long long type);
+void __sanitizer_syscall_post_impl___acl_delete_link(long long res,
+                                                     long long path,
+                                                     long long type);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_link(long long path,
+                                                      long long type,
+                                                      long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_link(long long res,
+                                                       long long path,
+                                                       long long type,
+                                                       long long aclp);
+void __sanitizer_syscall_pre_impl___acl_get_file(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_file(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_file(long long path, long long type,
+                                                 long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_file(long long res, long long path,
+                                                  long long type,
+                                                  long long aclp);
+void __sanitizer_syscall_pre_impl___acl_get_fd(long long filedes,
+                                               long long type, long long aclp);
+void __sanitizer_syscall_post_impl___acl_get_fd(long long res,
+                                                long long filedes,
+                                                long long type, long long aclp);
+void __sanitizer_syscall_pre_impl___acl_set_fd(long long filedes,
+                                               long long type, long long aclp);
+void __sanitizer_syscall_post_impl___acl_set_fd(long long res,
+                                                long long filedes,
+                                                long long type, long long aclp);
+void __sanitizer_syscall_pre_impl___acl_delete_file(long long path,
+                                                    long long type);
+void __sanitizer_syscall_post_impl___acl_delete_file(long long res,
+                                                     long long path,
+                                                     long long type);
+void __sanitizer_syscall_pre_impl___acl_delete_fd(long long filedes,
+                                                  long long type);
+void __sanitizer_syscall_post_impl___acl_delete_fd(long long res,
+                                                   long long filedes,
+                                                   long long type);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_file(long long path,
+                                                      long long type,
+                                                      long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_file(long long res,
+                                                       long long path,
+                                                       long long type,
+                                                       long long aclp);
+void __sanitizer_syscall_pre_impl___acl_aclcheck_fd(long long filedes,
+                                                    long long type,
+                                                    long long aclp);
+void __sanitizer_syscall_post_impl___acl_aclcheck_fd(long long res,
+                                                     long long filedes,
+                                                     long long type,
+                                                     long long aclp);
+void __sanitizer_syscall_pre_impl_lpathconf(long long path, long long name);
+void __sanitizer_syscall_post_impl_lpathconf(long long res, long long path,
+                                             long long name);
 
 #ifdef __cplusplus
 } // extern "C"
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/scudo_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/scudo_interface.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/scudo_interface.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface_atomic.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/tsan_interface_atomic.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/tsan_interface_atomic.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h b/linux-x86/lib64/clang/12.0.1/include/sanitizer/ubsan_interface.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sanitizer/ubsan_interface.h
rename to linux-x86/lib64/clang/12.0.1/include/sanitizer/ubsan_interface.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/serializeintrin.h b/linux-x86/lib64/clang/12.0.1/include/serializeintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/serializeintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/serializeintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/sgxintrin.h b/linux-x86/lib64/clang/12.0.1/include/sgxintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/sgxintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/sgxintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/shaintrin.h b/linux-x86/lib64/clang/12.0.1/include/shaintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/shaintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/shaintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/smmintrin.h b/linux-x86/lib64/clang/12.0.1/include/smmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/smmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/smmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/stdalign.h b/linux-x86/lib64/clang/12.0.1/include/stdalign.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/stdalign.h
rename to linux-x86/lib64/clang/12.0.1/include/stdalign.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/stdarg.h b/linux-x86/lib64/clang/12.0.1/include/stdarg.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/stdarg.h
rename to linux-x86/lib64/clang/12.0.1/include/stdarg.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/stdatomic.h b/linux-x86/lib64/clang/12.0.1/include/stdatomic.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/stdatomic.h
rename to linux-x86/lib64/clang/12.0.1/include/stdatomic.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/stdbool.h b/linux-x86/lib64/clang/12.0.1/include/stdbool.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/stdbool.h
rename to linux-x86/lib64/clang/12.0.1/include/stdbool.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/stddef.h b/linux-x86/lib64/clang/12.0.1/include/stddef.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/stddef.h
rename to linux-x86/lib64/clang/12.0.1/include/stddef.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/stdint.h b/linux-x86/lib64/clang/12.0.1/include/stdint.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/stdint.h
rename to linux-x86/lib64/clang/12.0.1/include/stdint.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/stdnoreturn.h b/linux-x86/lib64/clang/12.0.1/include/stdnoreturn.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/stdnoreturn.h
rename to linux-x86/lib64/clang/12.0.1/include/stdnoreturn.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/tbmintrin.h b/linux-x86/lib64/clang/12.0.1/include/tbmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/tbmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/tbmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/tgmath.h b/linux-x86/lib64/clang/12.0.1/include/tgmath.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/tgmath.h
rename to linux-x86/lib64/clang/12.0.1/include/tgmath.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/tmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/tmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/tmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/tmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h b/linux-x86/lib64/clang/12.0.1/include/tsxldtrkintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/tsxldtrkintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/tsxldtrkintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/unwind.h b/linux-x86/lib64/clang/12.0.1/include/unwind.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/unwind.h
rename to linux-x86/lib64/clang/12.0.1/include/unwind.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/vadefs.h b/linux-x86/lib64/clang/12.0.1/include/vadefs.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/vadefs.h
rename to linux-x86/lib64/clang/12.0.1/include/vadefs.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/vaesintrin.h b/linux-x86/lib64/clang/12.0.1/include/vaesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/vaesintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/vaesintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/varargs.h b/linux-x86/lib64/clang/12.0.1/include/varargs.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/varargs.h
rename to linux-x86/lib64/clang/12.0.1/include/varargs.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/vecintrin.h b/linux-x86/lib64/clang/12.0.1/include/vecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/vecintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/vecintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h b/linux-x86/lib64/clang/12.0.1/include/vpclmulqdqintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/vpclmulqdqintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/vpclmulqdqintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/waitpkgintrin.h b/linux-x86/lib64/clang/12.0.1/include/waitpkgintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/waitpkgintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/waitpkgintrin.h
diff --git a/darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h b/linux-x86/lib64/clang/12.0.1/include/wasm_simd128.h
similarity index 91%
copy from darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h
copy to linux-x86/lib64/clang/12.0.1/include/wasm_simd128.h
index b781238..ac88516 100644
--- a/darwin-x86/lib64/clang/11.0.5/include/wasm_simd128.h
+++ b/linux-x86/lib64/clang/12.0.1/include/wasm_simd128.h
@@ -18,8 +18,7 @@
 
 // Internal types determined by clang builtin definitions
 typedef int32_t __v128_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef char __i8x16 __attribute__((__vector_size__(16), __aligned__(16)));
-typedef signed char __s8x16
+typedef signed char __i8x16
     __attribute__((__vector_size__(16), __aligned__(16)));
 typedef unsigned char __u8x16
     __attribute__((__vector_size__(16), __aligned__(16)));
@@ -35,6 +34,13 @@
 typedef float __f32x4 __attribute__((__vector_size__(16), __aligned__(16)));
 typedef double __f64x2 __attribute__((__vector_size__(16), __aligned__(16)));
 
+typedef signed char __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
+typedef unsigned char __u8x8
+    __attribute__((__vector_size__(8), __aligned__(8)));
+typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
+typedef unsigned short __u16x4
+    __attribute__((__vector_size__(8), __aligned__(8)));
+
 #define __DEFAULT_FN_ATTRS                                                     \
   __attribute__((__always_inline__, __nodebug__, __target__("simd128"),        \
                  __min_vector_width__(128)))
@@ -273,7 +279,7 @@
   (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
 
 #define wasm_u8x16_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_u_i8x16((__i8x16)(__a), __i))
+  (__builtin_wasm_extract_lane_u_i8x16((__u8x16)(__a), __i))
 
 #define wasm_i8x16_replace_lane(__a, __i, __b)                                 \
   ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
@@ -286,7 +292,7 @@
   (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
 
 #define wasm_u16x8_extract_lane(__a, __i)                                      \
-  (__builtin_wasm_extract_lane_u_i16x8((__i16x8)(__a), __i))
+  (__builtin_wasm_extract_lane_u_i16x8((__u16x8)(__a), __i))
 
 #define wasm_i16x8_replace_lane(__a, __i, __b)                                 \
   ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
@@ -333,17 +339,17 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a == (__s8x16)__b);
+  return (v128_t)((__i8x16)__a == (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ne(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a != (__s8x16)__b);
+  return (v128_t)((__i8x16)__a != (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_lt(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a < (__s8x16)__b);
+  return (v128_t)((__i8x16)__a < (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_lt(v128_t __a,
@@ -353,7 +359,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_gt(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a > (__s8x16)__b);
+  return (v128_t)((__i8x16)__a > (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_gt(v128_t __a,
@@ -363,7 +369,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_le(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a <= (__s8x16)__b);
+  return (v128_t)((__i8x16)__a <= (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_le(v128_t __a,
@@ -373,7 +379,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_ge(v128_t __a,
                                                           v128_t __b) {
-  return (v128_t)((__s8x16)__a >= (__s8x16)__b);
+  return (v128_t)((__i8x16)__a >= (__i8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_ge(v128_t __a,
@@ -595,7 +601,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shr(v128_t __a,
                                                            int32_t __b) {
-  return (v128_t)((__s8x16)__a >> __b);
+  return (v128_t)((__i8x16)__a >> __b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_shr(v128_t __a,
@@ -616,8 +622,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
+  return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__u8x16)__a,
+                                                     (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
@@ -633,8 +639,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__i8x16)__a,
-                                                     (__i8x16)__b);
+  return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__u8x16)__a,
+                                                     (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
@@ -644,7 +650,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_min(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i8x16((__i8x16)__a, (__i8x16)__b);
+  return (v128_t)__builtin_wasm_min_u_i8x16((__u8x16)__a, (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_max(v128_t __a,
@@ -654,12 +660,12 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_max(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i8x16((__i8x16)__a, (__i8x16)__b);
+  return (v128_t)__builtin_wasm_max_u_i8x16((__u8x16)__a, (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_avgr(v128_t __a,
                                                             v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i8x16((__i8x16)__a, (__i8x16)__b);
+  return (v128_t)__builtin_wasm_avgr_u_i8x16((__u8x16)__a, (__u8x16)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_abs(v128_t __a) {
@@ -706,8 +712,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
+  return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__u16x8)__a,
+                                                     (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
@@ -723,8 +729,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
+  return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__u16x8)__a,
+                                                     (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
@@ -739,7 +745,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_min(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i16x8((__i16x8)__a, (__i16x8)__b);
+  return (v128_t)__builtin_wasm_min_u_i16x8((__u16x8)__a, (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_max(v128_t __a,
@@ -749,12 +755,12 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_max(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i16x8((__i16x8)__a, (__i16x8)__b);
+  return (v128_t)__builtin_wasm_max_u_i16x8((__u16x8)__a, (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_avgr(v128_t __a,
                                                             v128_t __b) {
-  return (v128_t)__builtin_wasm_avgr_u_i16x8((__i16x8)__a, (__i16x8)__b);
+  return (v128_t)__builtin_wasm_avgr_u_i16x8((__u16x8)__a, (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_abs(v128_t __a) {
@@ -810,7 +816,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_min(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_min_u_i32x4((__i32x4)__a, (__i32x4)__b);
+  return (v128_t)__builtin_wasm_min_u_i32x4((__u32x4)__a, (__u32x4)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_max(v128_t __a,
@@ -820,7 +826,7 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
                                                            v128_t __b) {
-  return (v128_t)__builtin_wasm_max_u_i32x4((__i32x4)__a, (__i32x4)__b);
+  return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
@@ -1071,8 +1077,8 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
-                                                     (__i16x8)__b);
+  return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__u16x8)__a,
+                                                     (__u16x8)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
@@ -1083,48 +1089,76 @@
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
-  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
-                                                     (__i32x4)__b);
+  return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__u32x4)__a,
+                                                     (__u32x4)__b);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_low_i8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_s_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2],
+               ((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5],
+               ((__i8x16)__a)[6], ((__i8x16)__a)[7]},
+      __i16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_high_i8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_s_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10],
+               ((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13],
+               ((__i8x16)__a)[14], ((__i8x16)__a)[15]},
+      __i16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_low_u8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_u_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2],
+               ((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5],
+               ((__u8x16)__a)[6], ((__u8x16)__a)[7]},
+      __u16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i16x8_widen_high_u8x16(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_u_i16x8_i8x16((__i8x16)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10],
+               ((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13],
+               ((__u8x16)__a)[14], ((__u8x16)__a)[15]},
+      __u16x8);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_low_i16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_s_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2],
+                ((__i16x8)__a)[3]},
+      __i32x4);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_high_i16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_s_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6],
+                ((__i16x8)__a)[7]},
+      __i32x4);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_low_u16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_low_u_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2],
+                ((__u16x8)__a)[3]},
+      __u32x4);
 }
 
 static __inline__ v128_t __DEFAULT_FN_ATTRS
 wasm_i32x4_widen_high_u16x8(v128_t __a) {
-  return (v128_t)__builtin_wasm_widen_high_u_i32x4_i16x8((__i16x8)__a);
+  return (v128_t) __builtin_convertvector(
+      (__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6],
+                ((__u16x8)__a)[7]},
+      __u32x4);
 }
 
 // Undefine helper macros
diff --git a/linux-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h b/linux-x86/lib64/clang/12.0.1/include/wbnoinvdintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/wbnoinvdintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/wbnoinvdintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/wmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/wmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/wmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/wmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/x86intrin.h b/linux-x86/lib64/clang/12.0.1/include/x86intrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/x86intrin.h
rename to linux-x86/lib64/clang/12.0.1/include/x86intrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/xmmintrin.h b/linux-x86/lib64/clang/12.0.1/include/xmmintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/xmmintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/xmmintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/xopintrin.h b/linux-x86/lib64/clang/12.0.1/include/xopintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/xopintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/xopintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/xsavecintrin.h b/linux-x86/lib64/clang/12.0.1/include/xsavecintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/xsavecintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/xsavecintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/xsaveintrin.h b/linux-x86/lib64/clang/12.0.1/include/xsaveintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/xsaveintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/xsaveintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h b/linux-x86/lib64/clang/12.0.1/include/xsaveoptintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/xsaveoptintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/xsaveoptintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/xsavesintrin.h b/linux-x86/lib64/clang/12.0.1/include/xsavesintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/xsavesintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/xsavesintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/include/xtestintrin.h b/linux-x86/lib64/clang/12.0.1/include/xtestintrin.h
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/include/xtestintrin.h
rename to linux-x86/lib64/clang/12.0.1/include/xtestintrin.h
diff --git a/linux-x86/lib64/clang/11.0.5/share/asan_blacklist.txt b/linux-x86/lib64/clang/12.0.1/share/asan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/share/asan_blacklist.txt
rename to linux-x86/lib64/clang/12.0.1/share/asan_blacklist.txt
diff --git a/linux-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt b/linux-x86/lib64/clang/12.0.1/share/cfi_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/share/cfi_blacklist.txt
rename to linux-x86/lib64/clang/12.0.1/share/cfi_blacklist.txt
diff --git a/linux-x86/lib64/clang/11.0.5/share/dfsan_abilist.txt b/linux-x86/lib64/clang/12.0.1/share/dfsan_abilist.txt
similarity index 99%
rename from linux-x86/lib64/clang/11.0.5/share/dfsan_abilist.txt
rename to linux-x86/lib64/clang/12.0.1/share/dfsan_abilist.txt
index b2021b7..55447ec 100644
--- a/linux-x86/lib64/clang/11.0.5/share/dfsan_abilist.txt
+++ b/linux-x86/lib64/clang/12.0.1/share/dfsan_abilist.txt
@@ -183,6 +183,7 @@
 
 # Functions that produce an output that is computed from the input, but is not
 # necessarily data dependent.
+fun:bcmp=custom
 fun:memchr=custom
 fun:memcmp=custom
 fun:strcasecmp=custom
@@ -191,6 +192,7 @@
 fun:strlen=custom
 fun:strncasecmp=custom
 fun:strncmp=custom
+fun:strpbrk=custom
 fun:strrchr=custom
 fun:strstr=custom
 
diff --git a/linux-x86/lib64/clang/11.0.5/share/hwasan_blacklist.txt b/linux-x86/lib64/clang/12.0.1/share/hwasan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/share/hwasan_blacklist.txt
rename to linux-x86/lib64/clang/12.0.1/share/hwasan_blacklist.txt
diff --git a/linux-x86/lib64/clang/11.0.5/share/msan_blacklist.txt b/linux-x86/lib64/clang/12.0.1/share/msan_blacklist.txt
similarity index 100%
rename from linux-x86/lib64/clang/11.0.5/share/msan_blacklist.txt
rename to linux-x86/lib64/clang/12.0.1/share/msan_blacklist.txt
diff --git a/linux-x86/lib64/libLLVM-11git.so b/linux-x86/lib64/libLLVM-11git.so
deleted file mode 100644
index 938702b..0000000
--- a/linux-x86/lib64/libLLVM-11git.so
+++ /dev/null
Binary files differ
diff --git a/linux-x86/lib64/libLLVM-12git.so b/linux-x86/lib64/libLLVM-12git.so
new file mode 100644
index 0000000..ccb3cc4
--- /dev/null
+++ b/linux-x86/lib64/libLLVM-12git.so
Binary files differ
diff --git a/linux-x86/lib64/libbase.so b/linux-x86/lib64/libbase.so
index 689d2c0..c8064e4 100755
--- a/linux-x86/lib64/libbase.so
+++ b/linux-x86/lib64/libbase.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so b/linux-x86/lib64/libc++.so
index fe9c13f..89ceed3 100755
--- a/linux-x86/lib64/libc++.so
+++ b/linux-x86/lib64/libc++.so
Binary files differ
diff --git a/linux-x86/lib64/libc++.so.1 b/linux-x86/lib64/libc++.so.1
index 7cc5763..e3ca474 100644
--- a/linux-x86/lib64/libc++.so.1
+++ b/linux-x86/lib64/libc++.so.1
Binary files differ
diff --git a/linux-x86/lib64/libclang_cxx.so.11git b/linux-x86/lib64/libclang_cxx.so.11git
deleted file mode 100644
index ab7f681..0000000
--- a/linux-x86/lib64/libclang_cxx.so.11git
+++ /dev/null
Binary files differ
diff --git a/linux-x86/lib64/libclang_cxx.so.12git b/linux-x86/lib64/libclang_cxx.so.12git
new file mode 100644
index 0000000..3f082ac
--- /dev/null
+++ b/linux-x86/lib64/libclang_cxx.so.12git
Binary files differ
diff --git a/linux-x86/lib64/liblog.so b/linux-x86/lib64/liblog.so
index ffb9a80..6b2e90b 100755
--- a/linux-x86/lib64/liblog.so
+++ b/linux-x86/lib64/liblog.so
Binary files differ
diff --git a/linux-x86/lib64/libprotobuf-cpp-full.so b/linux-x86/lib64/libprotobuf-cpp-full.so
index 05e00f5..a396352 100755
--- a/linux-x86/lib64/libprotobuf-cpp-full.so
+++ b/linux-x86/lib64/libprotobuf-cpp-full.so
Binary files differ
diff --git a/linux-x86/lib64/libxml2.so.2.9.10 b/linux-x86/lib64/libxml2.so.2.9.10
index 9c2b702..363bec0 100644
--- a/linux-x86/lib64/libxml2.so.2.9.10
+++ b/linux-x86/lib64/libxml2.so.2.9.10
Binary files differ
diff --git a/linux-x86/lib64/libz-host.so b/linux-x86/lib64/libz-host.so
index 3f20793..75d42c2 100755
--- a/linux-x86/lib64/libz-host.so
+++ b/linux-x86/lib64/libz-host.so
Binary files differ
diff --git a/linux-x86/lib64/libziparchive.so b/linux-x86/lib64/libziparchive.so
index b700e96..a532aa7 100755
--- a/linux-x86/lib64/libziparchive.so
+++ b/linux-x86/lib64/libziparchive.so
Binary files differ
diff --git a/manifest.xml b/manifest.xml
index ec0c625..9bf55c0 100644
--- a/manifest.xml
+++ b/manifest.xml
@@ -1,125 +1,127 @@
 <?xml version='1.0' encoding='UTF-8'?>
 
 <manifest>
-  <remote fetch=".." name="aosp" review="https://android-review.googlesource.com/" />
+  <remote fetch="https://android-review.googlesource.com/" name="aosp" review="https://android-review.googlesource.com/" />
 
   <default remote="aosp" revision="master" />
 
-  <project name="platform/development" path="development" revision="8d6aeab88209798a45b03f7e52c85bc6665b510b" />
+  <project name="platform/development" path="development" revision="5b757feed9a17412d8b02911d92939d17e404339" />
 
-  <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" revision="70a8d2affe5a31cbb6ae03469a83981b3331c9a4" />
+  <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8" revision="7568ee7fc921690e303cbf7088f18e6f5cfaaba3" />
 
   <project name="platform/external/go-creachadair-stringset" path="external/go-creachadair-stringset" revision="580fe893c0cd9bc1cb41a735b6ff456901f00e83" />
 
-  <project name="platform/build/soong" path="build/soong" revision="70bc5454079dd5a99fa5342990683524b327e954">
+  <project name="platform/build/soong" path="build/soong" revision="016370b1466e16d69e6c8a59967b5361848a7da7">
     <linkfile dest="Android.bp" src="root.bp" />
 
     <linkfile dest="bootstrap.bash" src="bootstrap.bash" />
 </project>
 
-  <project name="platform/external/ninja" path="external/ninja" revision="4b3e54d29302e0accfab963fc1af21b2d44771d1" />
+  <project name="platform/external/ninja" path="external/ninja" revision="64665f0233bcdbbb9c9dc5b0f441a7c1fa11137c" />
 
-  <project name="platform/build/blueprint" path="build/blueprint" revision="e0118ca809f8c533c3c6423585545360c88ed4da" />
+  <project name="platform/build/blueprint" path="build/blueprint" revision="52176e98518be598f45b6ec278d5c392a5a43acb" />
 
   <project name="platform/external/rapidjson" path="external/rapidjson" revision="9fa2a3d9e356a1f42a6184dcf1e0508ddfa9dbfb" />
 
-  <project name="platform/external/jsoncpp" path="external/jsoncpp" revision="f5b9450d34b337a4620fcd1e8099782139cfbc70" />
+  <project name="platform/external/jsoncpp" path="external/jsoncpp" revision="2e9e481ad2300b7aaa0583c9be25dc284889a7bf" />
 
-  <project name="platform/external/googletest" path="external/googletest" revision="ee3fca52fa6409aacab73bf519633d21a3a3a738" />
+  <project name="platform/external/googletest" path="external/googletest" revision="f93adba5c98356dfee63567b65261161c219ad93" />
 
   <project name="platform/external/regex-re2" path="external/regex-re2" revision="84e28962b2c2f357b5daccb460501b169193fafe" />
 
-  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="48542e4070ba33560f29ee5175a6f8490d11633c" />
+  <project clone-depth="1" name="platform/prebuilts/build-tools" path="prebuilts/build-tools" revision="79299d14bbe7041e982afee4ed6aa383e2b9a0aa" />
 
-  <project name="platform/bionic" path="bionic" revision="08a43426a0bcbc53cd2a33b3624bc6b3f7176117" />
+  <project name="platform/bionic" path="bionic" revision="4e67866510aa27445e9d699ed40d55163b66e95f" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/go/linux-x86" path="prebuilts/go/linux-x86" revision="fa2108d8e617e4f7d2585c27e18c09e65cd73ff8" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/go/linux-x86" path="prebuilts/go/linux-x86" revision="0f0f207a3a947fe60986dfcfa17ae178d4caf2af" />
 
-  <project name="platform/external/zlib" path="external/zlib" revision="28b8c108faa203d7f3675e2fa40fa0c11239c0a7" />
+  <project name="platform/external/zlib" path="external/zlib" revision="4fa66caa136303294cabb33a634c6ccf18926280" />
 
   <project name="platform/external/golang-x-tools" path="external/golang-x-tools" revision="bcc6484babb4e999e4bdb6a982bfa7e92fc640d2" />
 
-  <project name="platform/external/zopfli" path="external/zopfli" revision="3c7853623a3fd85d952b5e4180c800b04c09669e" />
+  <project name="platform/external/zopfli" path="external/zopfli" revision="a21b2e37625f379da4d38e952ae1e6c8ad3d61c8" />
 
   <project name="platform/external/go-cmp" path="external/go-cmp" revision="0f7c828c80e325c9fc2af078ffb108362bb84c15" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/go/darwin-x86" path="prebuilts/go/darwin-x86" revision="5367c0b6f8ef3536becae14b3f3713390372abfa" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/go/darwin-x86" path="prebuilts/go/darwin-x86" revision="de3ce1c2db7f5518195c77d90a6c3f1ca6cc0127" />
 
-  <project name="platform/system/libbase" path="system/libbase" revision="f927d5ca7e028eff433e068fd3c2f2954b5b00cf" />
+  <project name="platform/system/libbase" path="system/libbase" revision="be15867b32f675c5677ef6be9e4d8f270552b44a" />
 
-  <project clone-depth="1" name="platform/prebuilts/jdk/jdk11" path="prebuilts/jdk/jdk11" revision="671f6a7cab9a313c08d1925b46264786c93feb5f" />
+  <project clone-depth="1" name="platform/prebuilts/jdk/jdk11" path="prebuilts/jdk/jdk11" revision="aac7c9dfe35464cb37b422d2691fb472bb555652" />
 
   <project name="platform/external/kythe" path="external/kythe" revision="9e960add0dc4d3798329897313793ee3d46dff3f" />
 
-  <project name="platform/external/libcxx" path="external/libcxx" revision="9057c2f748e6005253cab91cc0e98d2a00d41acc" />
+  <project name="platform/external/libcxx" path="external/libcxx" revision="a23eef8375ab8e8d66833668486a50f1fe47de4a" />
 
-  <project groups="pdk" name="platform/external/boringssl" path="external/boringssl" revision="ec522029127e610390f9a469dc135cf12495718c" />
+  <project groups="pdk" name="platform/external/boringssl" path="external/boringssl" revision="16f7c57a20446b2f2ae0e31600e86b14f3f58dd1" />
 
   <project clone-depth="1" groups="linux" name="platform/prebuilts/ninja/linux-x86" path="prebuilts/ninja/linux-x86" revision="6369b19fc3fbe765636af75d394627e2b92599ed" />
 
-  <project name="platform/external/clang" path="external/clang" revision="10d160c2c39d0b0f33f9be135c4dc6349e996eb8" />
+  <project name="platform/external/clang" path="external/clang" revision="e5470fe919b651da3b226e88c24e51faec68ceb5" />
 
-  <project name="platform/external/libcxxabi" path="external/libcxxabi" revision="3364b1d8fc2d3447d44d9ae44e0eeb8fd4107fba" />
+  <project name="platform/external/libcxxabi" path="external/libcxxabi" revision="4b3634b5f8131cf59b3670148079ab1e3622b413" />
 
   <project clone-depth="1" name="platform/prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" path="prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8" revision="d9aafaade740ca38612c742f6d87debf362132ea" />
 
-  <project name="platform/build/kati" path="build/kati" revision="5125da301ca0a1680c26b5d9f574dc174fdfa98c" />
+  <project name="platform/system/logging" path="system/logging" revision="2e329310ed8c9185a9fe76ead8e88a49f9d4df0c" />
+
+  <project name="platform/build/kati" path="build/kati" revision="577d56770bcc18f3401a3e712df3b777ec63b291" />
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/headers" path="prebuilts/gcc/darwin-x86/host/headers" revision="4ac4f7cc41cf3c9e36fc3d6cf37fd1cfa9587a68" />
 
-  <project name="platform/system/libziparchive" path="system/libziparchive" revision="5357e00bea027e10fffdcf94e70054f1d77c20b0" />
+  <project name="platform/system/libziparchive" path="system/libziparchive" revision="07fc7e6e4649970f5b1ea80e095dcdbd4863d74b" />
 
-  <project name="platform/external/libunwind" path="external/libunwind" revision="a3af76adb4715dd6e2f40cc7f165e52670a72a9a" />
+  <project name="platform/external/libunwind" path="external/libunwind" revision="30ef568c2bd147257522e9f424b1e86c4536895e" />
 
-  <project name="platform/system/core" path="system/core" revision="3f452134da2a4d4578b46a254f41dbd008a72f82" />
+  <project name="platform/system/core" path="system/core" revision="942e5eef44a96c6f63b3fb5efe22afdc0a7095ef" />
 
-  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="28187863afd843d34a8595017d509f0caa487c93" />
+  <project clone-depth="1" groups="linux" name="platform/prebuilts/clang/host/linux-x86" path="prebuilts/clang/host/linux-x86" revision="fcce3268397ce68ee36f9c52f3a55630897328b3" />
 
-  <project clone-depth="3" name="platform/prebuilts/clang-tools" path="prebuilts/clang-tools" revision="75439a9e6bf9ae45ff41bb250b2b59d34ec3efe5" />
+  <project clone-depth="3" name="platform/prebuilts/clang-tools" path="prebuilts/clang-tools" revision="3555e8a0e7c1180da3429e9811c9afbd7cb4c2fd" />
 
-  <project name="platform/external/golang-protobuf" path="external/golang-protobuf" revision="5c0a66170926160524118a1b5d5e5bd6d697898e" />
+  <project name="platform/external/golang-protobuf" path="external/golang-protobuf" revision="3f39cb7711bee92f34207510c257118149ed2508" />
 
-  <project clone-depth="1" name="platform/prebuilts/jdk/jdk9" path="prebuilts/jdk/jdk9" revision="b113c3808dea0cc1ef161e235f42705592971f82" />
+  <project clone-depth="1" name="platform/prebuilts/jdk/jdk9" path="prebuilts/jdk/jdk9" revision="0f38929e89d8a7bb9ef55f32666f242e31d4802d" />
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" path="prebuilts/gcc/darwin-x86/host/i686-apple-darwin-4.2.1" revision="353332b8c310174c2dff55db520349cff40b1295" />
 
-  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="cb2888033cf654029351044bd4392fc330f319e5" />
+  <project name="platform/external/python/cpython3" path="external/python/cpython3" revision="e6b5c0a3f5322a61b0a4ccc7688483006d67012a" />
 
-  <project name="platform/external/compiler-rt" path="external/compiler-rt" revision="e1df04d40038fc940d1a35f8bc3145f91c014908" />
+  <project name="platform/external/compiler-rt" path="external/compiler-rt" revision="65c164706bc4cc5deb780f83cccfd12228b50393" />
 
   <project name="platform/external/golang-x-sync" path="external/golang-x-sync" revision="6ec98728804a67c2ee80bbfffae3233a5b46168d" />
 
   <project name="platform/external/go-creachadair-shell" path="external/go-creachadair-shell" revision="97d772a0b6c62d6bfac71224c4fdbf896a5478ba" />
 
-  <project name="platform/external/libunwind_llvm" path="external/libunwind_llvm" revision="5011a847022edf4c12a2dd20f827374abc93d51f" />
+  <project name="platform/external/libunwind_llvm" path="external/libunwind_llvm" revision="4b1bd5d6804c250d6f6724ea799cbf7f2994da2a" />
 
-  <project name="platform/external/fmtlib" path="external/fmtlib" revision="61708eeb46d540d7b31944be926ef3c9b5123496" />
+  <project name="platform/external/fmtlib" path="external/fmtlib" revision="a3306f2cbe69a421549b592970dd61dd61a8c54b" />
 
-  <project name="platform/external/llvm" path="external/llvm" revision="d48ce923680e694bbb884ecc7d13a0d1725d2061" />
+  <project name="platform/external/llvm" path="external/llvm" revision="d845b48987996a27ce12ba697fb208c34340f582" />
 
-  <project groups="pdk" name="platform/external/gflags" path="external/gflags" revision="f3c8cc25b57a8161128d8c2bb4cc392226d4a72b" />
+  <project groups="pdk" name="platform/external/gflags" path="external/gflags" revision="9cfee34507cd80ce6d24a56be304f14ac74299d1" />
 
-  <project name="platform/build" path="build/make" revision="2f171ac0a5586fa9f3dd7859d542c64a185228ca">
+  <project name="platform/build" path="build/make" revision="4e98e687ec71fe7e62dd6c11454febaa190e3db0">
     <linkfile dest="build/tools" src="tools" />
 </project>
 
-  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="01cc4c919daa1ad203ba64a4c54a293399820166" />
+  <project clone-depth="1" name="platform/prebuilts/misc" path="prebuilts/misc" revision="a4777595c3718e29f47c31d60c830cea2cfabb1a" />
 
   <project name="platform/external/go-subcommands" path="external/go-subcommands" revision="409adebbdb103fb745f56343d7345b57c8ea13aa" />
 
-  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="0bd4f978e2f563979a873cac2126a4090bb60e67" />
+  <project clone-depth="1" groups="darwin" name="platform/prebuilts/clang/host/darwin-x86" path="prebuilts/clang/host/darwin-x86" revision="81a459c97651f457ada8b67ba21162cd9cded54d" />
 
-  <project name="platform/external/protobuf" path="external/protobuf" revision="43e6f74c3dd6bb1f6e97111e31206f8cb8c7a438" />
+  <project name="platform/external/protobuf" path="external/protobuf" revision="b4330cc2d8edadaf529aeade52c261d95d1ee35d" />
 
   <project clone-depth="1" name="platform/prebuilts/jdk/jdk8" path="prebuilts/jdk/jdk8" revision="74e4f1844dfa9b8df9e0fe2ff34a2ecc24d52b07" />
 
   <project clone-depth="1" groups="darwin" name="platform/prebuilts/ninja/darwin-x86" path="prebuilts/ninja/darwin-x86" revision="00f798346dedb4a7a6a6dcc9ad32ff09d66ee0db" />
 
-  <project name="platform/external/python/cpython2" path="external/python/cpython2" revision="30b69e13b4c8f6273523290bf1724784bab8ed55" />
+  <project name="platform/external/python/cpython2" path="external/python/cpython2" revision="ebadebeec5415918b68cc6a6569df23c51f63bd7" />
 
-  <project name="platform/dalvik" path="dalvik" revision="f73f651cea3087424db1179cf72a7b67ff537715" />
+  <project name="platform/dalvik" path="dalvik" revision="35e485b68acd8742901bad5625c4b73a715ed6b8" />
 
-  <project name="platform/external/abseil-cpp" path="external/abseil-cpp" revision="2b33f4af7208b9b14d18c001e8175aae97afa7ed" />
+  <project name="platform/external/abseil-cpp" path="external/abseil-cpp" revision="fd30bf9a4b179e3206c3f3d2c8e54e4b15bd3866" />
 
   <project name="platform/external/go-etree" path="external/go-etree" revision="7fa46d9c9eb9134443a7fc56a354f07b15fc3a76" />
 </manifest>